aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorKalle Valo <kvalo@qca.qualcomm.com>2011-12-16 14:10:39 -0500
committerKalle Valo <kvalo@qca.qualcomm.com>2011-12-16 14:10:39 -0500
commit7e95e365d5399647a41e10059e4b09826b82d78b (patch)
tree305c9968798adae3d9484657339fa39d2a5fdaac /drivers/net
parent3ca9d1fc9aa64077645a26c396de9399b49ea226 (diff)
parent5bd5e9a6ae5137a61d0b5c277eac61892d89fc4f (diff)
Merge remote branch 'wireless-next/master' into ath6kl-next
Conflicts: drivers/net/wireless/ath/ath6kl/init.c
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c37
-rw-r--r--drivers/net/bonding/bond_procfs.c13
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c48
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h2
-rw-r--r--drivers/net/ethernet/apple/Kconfig12
-rw-r--r--drivers/net/ethernet/apple/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c195
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h21
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig14
-rw-r--r--drivers/net/ethernet/cirrus/Makefile1
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c (renamed from drivers/net/ethernet/apple/mac89x0.c)0
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c67
-rw-r--r--drivers/net/ethernet/ethoc.c1
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/i825xx/3c505.c6
-rw-r--r--drivers/net/ethernet/intel/Kconfig6
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c21
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c88
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c1
-rw-r--r--drivers/net/ethernet/realtek/Kconfig12
-rw-r--r--drivers/net/ethernet/realtek/Makefile1
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c3
-rw-r--r--drivers/net/ethernet/sfc/rx.c1
-rw-r--r--drivers/net/ethernet/silan/Kconfig33
-rw-r--r--drivers/net/ethernet/silan/Makefile5
-rw-r--r--drivers/net/ethernet/silan/sc92031.c (renamed from drivers/net/ethernet/realtek/sc92031.c)0
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig9
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c1
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/phy/realtek.c1
-rw-r--r--drivers/net/rionet.c4
-rw-r--r--drivers/net/usb/lg-vl600.c1
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c1
-rw-r--r--drivers/net/wimax/i2400m/fw.c1
-rw-r--r--drivers/net/wimax/i2400m/netdev.c1
-rw-r--r--drivers/net/wimax/i2400m/rx.c2
-rw-r--r--drivers/net/wimax/i2400m/sdio.c1
-rw-r--r--drivers/net/wimax/i2400m/tx.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/ath/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath.h11
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c91
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h569
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c287
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c27
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c217
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h124
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c370
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c81
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c75
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c222
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c853
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c143
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c230
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h59
-rw-r--r--drivers/net/wireless/ath/ath5k/rfgain.h22
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig22
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c1464
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h102
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h42
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h31
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c215
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h57
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c221
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h213
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c220
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c427
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h24
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h306
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c251
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c4
-rw-r--r--drivers/net/wireless/ath/debug.c1
-rw-r--r--drivers/net/wireless/ath/hw.c1
-rw-r--r--drivers/net/wireless/ath/key.c1
-rw-r--r--drivers/net/wireless/ath/regd.c78
-rw-r--r--drivers/net/wireless/b43/b43.h12
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43/pcmcia.c1
-rw-r--r--drivers/net/wireless/b43/phy_n.c312
-rw-r--r--drivers/net/wireless/b43/phy_n.h8
-rw-r--r--drivers/net/wireless/b43/radio_2056.c25
-rw-r--r--drivers/net/wireless/b43/radio_2056.h1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c77
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h31
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h16
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h25
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c146
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c214
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h19
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c85
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c322
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c74
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c1239
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h148
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/d11.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c289
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.h9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c208
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c749
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h10
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.c241
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.c76
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c231
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c69
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c84
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c270
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.c50
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/types.h54
-rw-r--r--drivers/net/wireless/brcm80211/include/chipcommon.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c1
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c505
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c3977
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c995
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2751
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h626
-rw-r--r--drivers/net/wireless/iwlegacy/4965-calib.c (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.c)613
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c746
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c6536
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2860
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c2421
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h1309
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig43
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile24
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h (renamed from drivers/net/wireless/iwlegacy/iwl-commands.h)1134
-rw-r--r--drivers/net/wireless/iwlegacy/common.c5706
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3424
-rw-r--r--drivers/net/wireless/iwlegacy/csr.h (renamed from drivers/net/wireless/iwlegacy/iwl-csr.h)93
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c1411
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h187
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c63
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h32
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c996
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c2741
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h308
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h75
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h811
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c73
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1194
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2871
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c215
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1378
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2183
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2661
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h636
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1313
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1364
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c42
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h210
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c553
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h196
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c205
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c281
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c549
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c541
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c658
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4016
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3281
-rw-r--r--drivers/net/wireless/iwlegacy/prph.h (renamed from drivers/net/wireless/iwlegacy/iwl-prph.h)133
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c72
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h99
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c (renamed from drivers/net/wireless/iwlwifi/iwl-sv-open.c)251
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c31
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-ucode.c)229
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c1
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/libertas/cmd.c1
-rw-r--r--drivers/net/wireless/libertas/debugfs.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/libertas/main.c2
-rw-r--r--drivers/net/wireless/libertas/rx.c1
-rw-r--r--drivers/net/wireless/libertas/tx.c1
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c1
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c22
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c197
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h1
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c9
-rw-r--r--drivers/net/wireless/mwifiex/init.c40
-rw-r--r--drivers/net/wireless/mwifiex/main.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.h18
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c20
-rw-r--r--drivers/net/wireless/mwifiex/scan.c19
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c22
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c23
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c5
-rw-r--r--drivers/net/wireless/orinoco/fw.c1
-rw-r--r--drivers/net/wireless/p54/eeprom.c1
-rw-r--r--drivers/net/wireless/p54/fwio.c1
-rw-r--r--drivers/net/wireless/p54/main.c1
-rw-r--r--drivers/net/wireless/p54/p54pci.c1
-rw-r--r--drivers/net/wireless/p54/p54spi.c6
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c5
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c335
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c86
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c26
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c10
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c37
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c63
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/main.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c56
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h13
-rw-r--r--drivers/net/wireless/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig10
-rw-r--r--drivers/net/wireless/wl12xx/Makefile3
-rw-r--r--drivers/net/wireless/wl12xx/acx.c172
-rw-r--r--drivers/net/wireless/wl12xx/acx.h91
-rw-r--r--drivers/net/wireless/wl12xx/boot.c16
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c368
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h50
-rw-r--r--drivers/net/wireless/wl12xx/conf.h4
-rw-r--r--drivers/net/wireless/wl12xx/debug.h101
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c157
-rw-r--r--drivers/net/wireless/wl12xx/event.c214
-rw-r--r--drivers/net/wireless/wl12xx/event.h3
-rw-r--r--drivers/net/wireless/wl12xx/init.c491
-rw-r--r--drivers/net/wireless/wl12xx/init.h8
-rw-r--r--drivers/net/wireless/wl12xx/io.c12
-rw-r--r--drivers/net/wireless/wl12xx/io.h23
-rw-r--r--drivers/net/wireless/wl12xx/main.c1966
-rw-r--r--drivers/net/wireless/wl12xx/ps.c56
-rw-r--r--drivers/net/wireless/wl12xx/ps.h9
-rw-r--r--drivers/net/wireless/wl12xx/reg.h2
-rw-r--r--drivers/net/wireless/wl12xx/rx.c38
-rw-r--r--drivers/net/wireless/wl12xx/scan.c102
-rw-r--r--drivers/net/wireless/wl12xx/scan.h8
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c259
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c543
-rw-r--r--drivers/net/wireless/wl12xx/spi.c215
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c77
-rw-r--r--drivers/net/wireless/wl12xx/tx.c382
-rw-r--r--drivers/net/wireless/wl12xx/tx.h11
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h384
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h5
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_platform_data.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/xen-netback/common.h11
-rw-r--r--drivers/net/xen-netback/netback.c80
447 files changed, 48004 insertions, 45289 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c34cc1e7c6f6..b0c577256487 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -550,7 +550,7 @@ down:
550/* 550/*
551 * Get link speed and duplex from the slave's base driver 551 * Get link speed and duplex from the slave's base driver
552 * using ethtool. If for some reason the call fails or the 552 * using ethtool. If for some reason the call fails or the
553 * values are invalid, fake speed and duplex to 100/Full 553 * values are invalid, set speed and duplex to -1,
554 * and return error. 554 * and return error.
555 */ 555 */
556static int bond_update_speed_duplex(struct slave *slave) 556static int bond_update_speed_duplex(struct slave *slave)
@@ -560,9 +560,8 @@ static int bond_update_speed_duplex(struct slave *slave)
560 u32 slave_speed; 560 u32 slave_speed;
561 int res; 561 int res;
562 562
563 /* Fake speed and duplex */ 563 slave->speed = SPEED_UNKNOWN;
564 slave->speed = SPEED_100; 564 slave->duplex = DUPLEX_UNKNOWN;
565 slave->duplex = DUPLEX_FULL;
566 565
567 res = __ethtool_get_settings(slave_dev, &ecmd); 566 res = __ethtool_get_settings(slave_dev, &ecmd);
568 if (res < 0) 567 if (res < 0)
@@ -1751,16 +1750,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1751 new_slave->link = BOND_LINK_DOWN; 1750 new_slave->link = BOND_LINK_DOWN;
1752 } 1751 }
1753 1752
1754 if (bond_update_speed_duplex(new_slave) && 1753 bond_update_speed_duplex(new_slave);
1755 (new_slave->link != BOND_LINK_DOWN)) {
1756 pr_warning("%s: Warning: failed to get speed and duplex from %s, assumed to be 100Mb/sec and Full.\n",
1757 bond_dev->name, new_slave->dev->name);
1758
1759 if (bond->params.mode == BOND_MODE_8023AD) {
1760 pr_warning("%s: Warning: Operation of 802.3ad mode requires ETHTOOL support in base driver for proper aggregator selection.\n",
1761 bond_dev->name);
1762 }
1763 }
1764 1754
1765 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1755 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
1766 /* if there is a primary slave, remember it */ 1756 /* if there is a primary slave, remember it */
@@ -3220,6 +3210,7 @@ static int bond_slave_netdev_event(unsigned long event,
3220{ 3210{
3221 struct net_device *bond_dev = slave_dev->master; 3211 struct net_device *bond_dev = slave_dev->master;
3222 struct bonding *bond = netdev_priv(bond_dev); 3212 struct bonding *bond = netdev_priv(bond_dev);
3213 struct slave *slave = NULL;
3223 3214
3224 switch (event) { 3215 switch (event) {
3225 case NETDEV_UNREGISTER: 3216 case NETDEV_UNREGISTER:
@@ -3230,20 +3221,16 @@ static int bond_slave_netdev_event(unsigned long event,
3230 bond_release(bond_dev, slave_dev); 3221 bond_release(bond_dev, slave_dev);
3231 } 3222 }
3232 break; 3223 break;
3224 case NETDEV_UP:
3233 case NETDEV_CHANGE: 3225 case NETDEV_CHANGE:
3234 if (bond->params.mode == BOND_MODE_8023AD || bond_is_lb(bond)) { 3226 slave = bond_get_slave_by_dev(bond, slave_dev);
3235 struct slave *slave; 3227 if (slave) {
3228 u32 old_speed = slave->speed;
3229 u8 old_duplex = slave->duplex;
3236 3230
3237 slave = bond_get_slave_by_dev(bond, slave_dev); 3231 bond_update_speed_duplex(slave);
3238 if (slave) {
3239 u32 old_speed = slave->speed;
3240 u8 old_duplex = slave->duplex;
3241
3242 bond_update_speed_duplex(slave);
3243
3244 if (bond_is_lb(bond))
3245 break;
3246 3232
3233 if (bond->params.mode == BOND_MODE_8023AD) {
3247 if (old_speed != slave->speed) 3234 if (old_speed != slave->speed)
3248 bond_3ad_adapter_speed_changed(slave); 3235 bond_3ad_adapter_speed_changed(slave);
3249 if (old_duplex != slave->duplex) 3236 if (old_duplex != slave->duplex)
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 95de93b90386..ad284baafe87 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -1,4 +1,5 @@
1#include <linux/proc_fs.h> 1#include <linux/proc_fs.h>
2#include <linux/export.h>
2#include <net/net_namespace.h> 3#include <net/net_namespace.h>
3#include <net/netns/generic.h> 4#include <net/netns/generic.h>
4#include "bonding.h" 5#include "bonding.h"
@@ -157,8 +158,16 @@ static void bond_info_show_slave(struct seq_file *seq,
157 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); 158 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
158 seq_printf(seq, "MII Status: %s\n", 159 seq_printf(seq, "MII Status: %s\n",
159 (slave->link == BOND_LINK_UP) ? "up" : "down"); 160 (slave->link == BOND_LINK_UP) ? "up" : "down");
160 seq_printf(seq, "Speed: %d Mbps\n", slave->speed); 161 if (slave->speed == SPEED_UNKNOWN)
161 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half"); 162 seq_printf(seq, "Speed: %s\n", "Unknown");
163 else
164 seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
165
166 if (slave->duplex == DUPLEX_UNKNOWN)
167 seq_printf(seq, "Duplex: %s\n", "Unknown");
168 else
169 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
170
162 seq_printf(seq, "Link Failure Count: %u\n", 171 seq_printf(seq, "Link Failure Count: %u\n",
163 slave->link_failure_count); 172 slave->link_failure_count);
164 173
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 6dff5a0e733f..597f4d45c632 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -159,6 +159,7 @@ config S6GMAC
159 will be called s6gmac. 159 will be called s6gmac.
160 160
161source "drivers/net/ethernet/seeq/Kconfig" 161source "drivers/net/ethernet/seeq/Kconfig"
162source "drivers/net/ethernet/silan/Kconfig"
162source "drivers/net/ethernet/sis/Kconfig" 163source "drivers/net/ethernet/sis/Kconfig"
163source "drivers/net/ethernet/sfc/Kconfig" 164source "drivers/net/ethernet/sfc/Kconfig"
164source "drivers/net/ethernet/sgi/Kconfig" 165source "drivers/net/ethernet/sgi/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c53ad3afc991..be5dde040261 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_SH_ETH) += renesas/
58obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ 58obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
59obj-$(CONFIG_S6GMAC) += s6gmac.o 59obj-$(CONFIG_S6GMAC) += s6gmac.o
60obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ 60obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
61obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
61obj-$(CONFIG_NET_VENDOR_SIS) += sis/ 62obj-$(CONFIG_NET_VENDOR_SIS) += sis/
62obj-$(CONFIG_SFC) += sfc/ 63obj-$(CONFIG_SFC) += sfc/
63obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ 64obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 82386677bb8c..4865ff14bebf 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -541,19 +541,17 @@ static void au1000_reset_mac(struct net_device *dev)
541 * these are not descriptors sitting in memory. 541 * these are not descriptors sitting in memory.
542 */ 542 */
543static void 543static void
544au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) 544au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
545{ 545{
546 int i; 546 int i;
547 547
548 for (i = 0; i < NUM_RX_DMA; i++) { 548 for (i = 0; i < NUM_RX_DMA; i++) {
549 aup->rx_dma_ring[i] = 549 aup->rx_dma_ring[i] = (struct rx_dma *)
550 (struct rx_dma *) 550 (tx_base + 0x100 + sizeof(struct rx_dma) * i);
551 (rx_base + sizeof(struct rx_dma)*i);
552 } 551 }
553 for (i = 0; i < NUM_TX_DMA; i++) { 552 for (i = 0; i < NUM_TX_DMA; i++) {
554 aup->tx_dma_ring[i] = 553 aup->tx_dma_ring[i] = (struct tx_dma *)
555 (struct tx_dma *) 554 (tx_base + sizeof(struct tx_dma) * i);
556 (tx_base + sizeof(struct tx_dma)*i);
557 } 555 }
558} 556}
559 557
@@ -1026,7 +1024,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1026 struct net_device *dev = NULL; 1024 struct net_device *dev = NULL;
1027 struct db_dest *pDB, *pDBfree; 1025 struct db_dest *pDB, *pDBfree;
1028 int irq, i, err = 0; 1026 int irq, i, err = 0;
1029 struct resource *base, *macen; 1027 struct resource *base, *macen, *macdma;
1030 1028
1031 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1029 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1032 if (!base) { 1030 if (!base) {
@@ -1049,6 +1047,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1049 goto out; 1047 goto out;
1050 } 1048 }
1051 1049
1050 macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1051 if (!macdma) {
1052 dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1053 err = -ENODEV;
1054 goto out;
1055 }
1056
1052 if (!request_mem_region(base->start, resource_size(base), 1057 if (!request_mem_region(base->start, resource_size(base),
1053 pdev->name)) { 1058 pdev->name)) {
1054 dev_err(&pdev->dev, "failed to request memory region for base registers\n"); 1059 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
@@ -1063,6 +1068,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1063 goto err_request; 1068 goto err_request;
1064 } 1069 }
1065 1070
1071 if (!request_mem_region(macdma->start, resource_size(macdma),
1072 pdev->name)) {
1073 dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1074 err = -ENXIO;
1075 goto err_macdma;
1076 }
1077
1066 dev = alloc_etherdev(sizeof(struct au1000_private)); 1078 dev = alloc_etherdev(sizeof(struct au1000_private));
1067 if (!dev) { 1079 if (!dev) {
1068 dev_err(&pdev->dev, "alloc_etherdev failed\n"); 1080 dev_err(&pdev->dev, "alloc_etherdev failed\n");
@@ -1109,10 +1121,14 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1109 } 1121 }
1110 aup->mac_id = pdev->id; 1122 aup->mac_id = pdev->id;
1111 1123
1112 if (pdev->id == 0) 1124 aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
1113 au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); 1125 if (!aup->macdma) {
1114 else if (pdev->id == 1) 1126 dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1115 au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); 1127 err = -ENXIO;
1128 goto err_remap3;
1129 }
1130
1131 au1000_setup_hw_rings(aup, aup->macdma);
1116 1132
1117 /* set a random MAC now in case platform_data doesn't provide one */ 1133 /* set a random MAC now in case platform_data doesn't provide one */
1118 random_ether_addr(dev->dev_addr); 1134 random_ether_addr(dev->dev_addr);
@@ -1252,6 +1268,8 @@ err_out:
1252err_mdiobus_reg: 1268err_mdiobus_reg:
1253 mdiobus_free(aup->mii_bus); 1269 mdiobus_free(aup->mii_bus);
1254err_mdiobus_alloc: 1270err_mdiobus_alloc:
1271 iounmap(aup->macdma);
1272err_remap3:
1255 iounmap(aup->enable); 1273 iounmap(aup->enable);
1256err_remap2: 1274err_remap2:
1257 iounmap(aup->mac); 1275 iounmap(aup->mac);
@@ -1261,6 +1279,8 @@ err_remap1:
1261err_vaddr: 1279err_vaddr:
1262 free_netdev(dev); 1280 free_netdev(dev);
1263err_alloc: 1281err_alloc:
1282 release_mem_region(macdma->start, resource_size(macdma));
1283err_macdma:
1264 release_mem_region(macen->start, resource_size(macen)); 1284 release_mem_region(macen->start, resource_size(macen));
1265err_request: 1285err_request:
1266 release_mem_region(base->start, resource_size(base)); 1286 release_mem_region(base->start, resource_size(base));
@@ -1293,9 +1313,13 @@ static int __devexit au1000_remove(struct platform_device *pdev)
1293 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1313 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1294 (void *)aup->vaddr, aup->dma_addr); 1314 (void *)aup->vaddr, aup->dma_addr);
1295 1315
1316 iounmap(aup->macdma);
1296 iounmap(aup->mac); 1317 iounmap(aup->mac);
1297 iounmap(aup->enable); 1318 iounmap(aup->enable);
1298 1319
1320 base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1321 release_mem_region(base->start, resource_size(base));
1322
1299 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1323 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1300 release_mem_region(base->start, resource_size(base)); 1324 release_mem_region(base->start, resource_size(base));
1301 1325
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index 6229c774552c..4b7f7ad62bb8 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -124,7 +124,7 @@ struct au1000_private {
124 */ 124 */
125 struct mac_reg *mac; /* mac registers */ 125 struct mac_reg *mac; /* mac registers */
126 u32 *enable; /* address of MAC Enable Register */ 126 u32 *enable; /* address of MAC Enable Register */
127 127 void __iomem *macdma; /* base of MAC DMA port */
128 u32 vaddr; /* virtual address of rx/tx buffers */ 128 u32 vaddr; /* virtual address of rx/tx buffers */
129 dma_addr_t dma_addr; /* dma address of rx/tx buffers */ 129 dma_addr_t dma_addr; /* dma address of rx/tx buffers */
130 130
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index a759d5483ab9..1375e2dc9468 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -52,18 +52,6 @@ config BMAC
52 To compile this driver as a module, choose M here: the module 52 To compile this driver as a module, choose M here: the module
53 will be called bmac. 53 will be called bmac.
54 54
55config MAC89x0
56 tristate "Macintosh CS89x0 based ethernet cards"
57 depends on MAC
58 ---help---
59 Support for CS89x0 chipset based Ethernet cards. If you have a
60 Nubus or LC-PDS network (Ethernet) card of this type, say Y and
61 read the Ethernet-HOWTO, available from
62 <http://www.tldp.org/docs.html#howto>.
63
64 To compile this driver as a module, choose M here. This module will
65 be called mac89x0.
66
67config MACMACE 55config MACMACE
68 bool "Macintosh (AV) onboard MACE ethernet" 56 bool "Macintosh (AV) onboard MACE ethernet"
69 depends on MAC 57 depends on MAC
diff --git a/drivers/net/ethernet/apple/Makefile b/drivers/net/ethernet/apple/Makefile
index 0d3a5919c95b..86eaa17af0f4 100644
--- a/drivers/net/ethernet/apple/Makefile
+++ b/drivers/net/ethernet/apple/Makefile
@@ -4,5 +4,4 @@
4 4
5obj-$(CONFIG_MACE) += mace.o 5obj-$(CONFIG_MACE) += mace.o
6obj-$(CONFIG_BMAC) += bmac.o 6obj-$(CONFIG_BMAC) += bmac.o
7obj-$(CONFIG_MAC89x0) += mac89x0.o
8obj-$(CONFIG_MACMACE) += macmace.o 7obj-$(CONFIG_MACMACE) += macmace.o
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 161cbbb4814a..bf4074167d6a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 89
90#define DRV_MODULE_NAME "tg3" 90#define DRV_MODULE_NAME "tg3"
91#define TG3_MAJ_NUM 3 91#define TG3_MAJ_NUM 3
92#define TG3_MIN_NUM 120 92#define TG3_MIN_NUM 121
93#define DRV_MODULE_VERSION \ 93#define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95#define DRV_MODULE_RELDATE "August 18, 2011" 95#define DRV_MODULE_RELDATE "November 2, 2011"
96 96
97#define RESET_KIND_SHUTDOWN 0 97#define RESET_KIND_SHUTDOWN 0
98#define RESET_KIND_INIT 1 98#define RESET_KIND_INIT 1
@@ -628,19 +628,23 @@ static void tg3_ape_lock_init(struct tg3 *tp)
628 regbase = TG3_APE_PER_LOCK_GRANT; 628 regbase = TG3_APE_PER_LOCK_GRANT;
629 629
630 /* Make sure the driver hasn't any stale locks. */ 630 /* Make sure the driver hasn't any stale locks. */
631 for (i = 0; i < 8; i++) { 631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 if (i == TG3_APE_LOCK_GPIO) 632 switch (i) {
633 continue; 633 case TG3_APE_LOCK_PHY0:
634 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); 634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
638 break;
639 default:
640 if (!tp->pci_fn)
641 bit = APE_LOCK_GRANT_DRIVER;
642 else
643 bit = 1 << tp->pci_fn;
644 }
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
635 } 646 }
636 647
637 /* Clear the correct bit of the GPIO lock too. */
638 if (!tp->pci_fn)
639 bit = APE_LOCK_GRANT_DRIVER;
640 else
641 bit = 1 << tp->pci_fn;
642
643 tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
644} 648}
645 649
646static int tg3_ape_lock(struct tg3 *tp, int locknum) 650static int tg3_ape_lock(struct tg3 *tp, int locknum)
@@ -658,6 +662,10 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
658 return 0; 662 return 0;
659 case TG3_APE_LOCK_GRC: 663 case TG3_APE_LOCK_GRC:
660 case TG3_APE_LOCK_MEM: 664 case TG3_APE_LOCK_MEM:
665 if (!tp->pci_fn)
666 bit = APE_LOCK_REQ_DRIVER;
667 else
668 bit = 1 << tp->pci_fn;
661 break; 669 break;
662 default: 670 default:
663 return -EINVAL; 671 return -EINVAL;
@@ -673,11 +681,6 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
673 681
674 off = 4 * locknum; 682 off = 4 * locknum;
675 683
676 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
677 bit = APE_LOCK_REQ_DRIVER;
678 else
679 bit = 1 << tp->pci_fn;
680
681 tg3_ape_write32(tp, req + off, bit); 684 tg3_ape_write32(tp, req + off, bit);
682 685
683 /* Wait for up to 1 millisecond to acquire lock. */ 686 /* Wait for up to 1 millisecond to acquire lock. */
@@ -710,6 +713,10 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
710 return; 713 return;
711 case TG3_APE_LOCK_GRC: 714 case TG3_APE_LOCK_GRC:
712 case TG3_APE_LOCK_MEM: 715 case TG3_APE_LOCK_MEM:
716 if (!tp->pci_fn)
717 bit = APE_LOCK_GRANT_DRIVER;
718 else
719 bit = 1 << tp->pci_fn;
713 break; 720 break;
714 default: 721 default:
715 return; 722 return;
@@ -720,11 +727,6 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
720 else 727 else
721 gnt = TG3_APE_PER_LOCK_GRANT; 728 gnt = TG3_APE_PER_LOCK_GRANT;
722 729
723 if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
724 bit = APE_LOCK_GRANT_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727
728 tg3_ape_write32(tp, gnt + 4 * locknum, bit); 730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
729} 731}
730 732
@@ -5927,6 +5929,18 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5927 return work_done; 5929 return work_done;
5928} 5930}
5929 5931
5932static inline void tg3_reset_task_schedule(struct tg3 *tp)
5933{
5934 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5935 schedule_work(&tp->reset_task);
5936}
5937
5938static inline void tg3_reset_task_cancel(struct tg3 *tp)
5939{
5940 cancel_work_sync(&tp->reset_task);
5941 tg3_flag_clear(tp, RESET_TASK_PENDING);
5942}
5943
5930static int tg3_poll_msix(struct napi_struct *napi, int budget) 5944static int tg3_poll_msix(struct napi_struct *napi, int budget)
5931{ 5945{
5932 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 5946 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
@@ -5967,7 +5981,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
5967tx_recovery: 5981tx_recovery:
5968 /* work_done is guaranteed to be less than budget. */ 5982 /* work_done is guaranteed to be less than budget. */
5969 napi_complete(napi); 5983 napi_complete(napi);
5970 schedule_work(&tp->reset_task); 5984 tg3_reset_task_schedule(tp);
5971 return work_done; 5985 return work_done;
5972} 5986}
5973 5987
@@ -6002,7 +6016,7 @@ static void tg3_process_error(struct tg3 *tp)
6002 tg3_dump_state(tp); 6016 tg3_dump_state(tp);
6003 6017
6004 tg3_flag_set(tp, ERROR_PROCESSED); 6018 tg3_flag_set(tp, ERROR_PROCESSED);
6005 schedule_work(&tp->reset_task); 6019 tg3_reset_task_schedule(tp);
6006} 6020}
6007 6021
6008static int tg3_poll(struct napi_struct *napi, int budget) 6022static int tg3_poll(struct napi_struct *napi, int budget)
@@ -6049,7 +6063,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
6049tx_recovery: 6063tx_recovery:
6050 /* work_done is guaranteed to be less than budget. */ 6064 /* work_done is guaranteed to be less than budget. */
6051 napi_complete(napi); 6065 napi_complete(napi);
6052 schedule_work(&tp->reset_task); 6066 tg3_reset_task_schedule(tp);
6053 return work_done; 6067 return work_done;
6054} 6068}
6055 6069
@@ -6338,11 +6352,11 @@ static void tg3_reset_task(struct work_struct *work)
6338{ 6352{
6339 struct tg3 *tp = container_of(work, struct tg3, reset_task); 6353 struct tg3 *tp = container_of(work, struct tg3, reset_task);
6340 int err; 6354 int err;
6341 unsigned int restart_timer;
6342 6355
6343 tg3_full_lock(tp, 0); 6356 tg3_full_lock(tp, 0);
6344 6357
6345 if (!netif_running(tp->dev)) { 6358 if (!netif_running(tp->dev)) {
6359 tg3_flag_clear(tp, RESET_TASK_PENDING);
6346 tg3_full_unlock(tp); 6360 tg3_full_unlock(tp);
6347 return; 6361 return;
6348 } 6362 }
@@ -6355,9 +6369,6 @@ static void tg3_reset_task(struct work_struct *work)
6355 6369
6356 tg3_full_lock(tp, 1); 6370 tg3_full_lock(tp, 1);
6357 6371
6358 restart_timer = tg3_flag(tp, RESTART_TIMER);
6359 tg3_flag_clear(tp, RESTART_TIMER);
6360
6361 if (tg3_flag(tp, TX_RECOVERY_PENDING)) { 6372 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6362 tp->write32_tx_mbox = tg3_write32_tx_mbox; 6373 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6363 tp->write32_rx_mbox = tg3_write_flush_reg32; 6374 tp->write32_rx_mbox = tg3_write_flush_reg32;
@@ -6372,14 +6383,13 @@ static void tg3_reset_task(struct work_struct *work)
6372 6383
6373 tg3_netif_start(tp); 6384 tg3_netif_start(tp);
6374 6385
6375 if (restart_timer)
6376 mod_timer(&tp->timer, jiffies + 1);
6377
6378out: 6386out:
6379 tg3_full_unlock(tp); 6387 tg3_full_unlock(tp);
6380 6388
6381 if (!err) 6389 if (!err)
6382 tg3_phy_start(tp); 6390 tg3_phy_start(tp);
6391
6392 tg3_flag_clear(tp, RESET_TASK_PENDING);
6383} 6393}
6384 6394
6385static void tg3_tx_timeout(struct net_device *dev) 6395static void tg3_tx_timeout(struct net_device *dev)
@@ -6391,7 +6401,7 @@ static void tg3_tx_timeout(struct net_device *dev)
6391 tg3_dump_state(tp); 6401 tg3_dump_state(tp);
6392 } 6402 }
6393 6403
6394 schedule_work(&tp->reset_task); 6404 tg3_reset_task_schedule(tp);
6395} 6405}
6396 6406
6397/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 6407/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
@@ -6442,31 +6452,26 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6442 hwbug = 1; 6452 hwbug = 1;
6443 6453
6444 if (tg3_flag(tp, 4K_FIFO_LIMIT)) { 6454 if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6455 u32 prvidx = *entry;
6445 u32 tmp_flag = flags & ~TXD_FLAG_END; 6456 u32 tmp_flag = flags & ~TXD_FLAG_END;
6446 while (len > TG3_TX_BD_DMA_MAX) { 6457 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6447 u32 frag_len = TG3_TX_BD_DMA_MAX; 6458 u32 frag_len = TG3_TX_BD_DMA_MAX;
6448 len -= TG3_TX_BD_DMA_MAX; 6459 len -= TG3_TX_BD_DMA_MAX;
6449 6460
6450 if (len) { 6461 /* Avoid the 8byte DMA problem */
6451 tnapi->tx_buffers[*entry].fragmented = true; 6462 if (len <= 8) {
6452 /* Avoid the 8byte DMA problem */ 6463 len += TG3_TX_BD_DMA_MAX / 2;
6453 if (len <= 8) { 6464 frag_len = TG3_TX_BD_DMA_MAX / 2;
6454 len += TG3_TX_BD_DMA_MAX / 2;
6455 frag_len = TG3_TX_BD_DMA_MAX / 2;
6456 }
6457 } else
6458 tmp_flag = flags;
6459
6460 if (*budget) {
6461 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6462 frag_len, tmp_flag, mss, vlan);
6463 (*budget)--;
6464 *entry = NEXT_TX(*entry);
6465 } else {
6466 hwbug = 1;
6467 break;
6468 } 6465 }
6469 6466
6467 tnapi->tx_buffers[*entry].fragmented = true;
6468
6469 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6470 frag_len, tmp_flag, mss, vlan);
6471 *budget -= 1;
6472 prvidx = *entry;
6473 *entry = NEXT_TX(*entry);
6474
6470 map += frag_len; 6475 map += frag_len;
6471 } 6476 }
6472 6477
@@ -6474,10 +6479,11 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6474 if (*budget) { 6479 if (*budget) {
6475 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, 6480 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6476 len, flags, mss, vlan); 6481 len, flags, mss, vlan);
6477 (*budget)--; 6482 *budget -= 1;
6478 *entry = NEXT_TX(*entry); 6483 *entry = NEXT_TX(*entry);
6479 } else { 6484 } else {
6480 hwbug = 1; 6485 hwbug = 1;
6486 tnapi->tx_buffers[prvidx].fragmented = false;
6481 } 6487 }
6482 } 6488 }
6483 } else { 6489 } else {
@@ -6509,7 +6515,7 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6509 txb = &tnapi->tx_buffers[entry]; 6515 txb = &tnapi->tx_buffers[entry];
6510 } 6516 }
6511 6517
6512 for (i = 0; i < last; i++) { 6518 for (i = 0; i <= last; i++) {
6513 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6519 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6514 6520
6515 entry = NEXT_TX(entry); 6521 entry = NEXT_TX(entry);
@@ -6559,6 +6565,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6559 dev_kfree_skb(new_skb); 6565 dev_kfree_skb(new_skb);
6560 ret = -1; 6566 ret = -1;
6561 } else { 6567 } else {
6568 u32 save_entry = *entry;
6569
6562 base_flags |= TXD_FLAG_END; 6570 base_flags |= TXD_FLAG_END;
6563 6571
6564 tnapi->tx_buffers[*entry].skb = new_skb; 6572 tnapi->tx_buffers[*entry].skb = new_skb;
@@ -6568,7 +6576,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6568 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, 6576 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6569 new_skb->len, base_flags, 6577 new_skb->len, base_flags,
6570 mss, vlan)) { 6578 mss, vlan)) {
6571 tg3_tx_skb_unmap(tnapi, *entry, 0); 6579 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6572 dev_kfree_skb(new_skb); 6580 dev_kfree_skb(new_skb);
6573 ret = -1; 6581 ret = -1;
6574 } 6582 }
@@ -6758,11 +6766,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6758 6766
6759 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | 6767 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6760 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), 6768 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6761 mss, vlan)) 6769 mss, vlan)) {
6762 would_hit_hwbug = 1; 6770 would_hit_hwbug = 1;
6763
6764 /* Now loop through additional data fragments, and queue them. */ 6771 /* Now loop through additional data fragments, and queue them. */
6765 if (skb_shinfo(skb)->nr_frags > 0) { 6772 } else if (skb_shinfo(skb)->nr_frags > 0) {
6766 u32 tmp_mss = mss; 6773 u32 tmp_mss = mss;
6767 6774
6768 if (!tg3_flag(tp, HW_TSO_1) && 6775 if (!tg3_flag(tp, HW_TSO_1) &&
@@ -6784,11 +6791,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6784 if (dma_mapping_error(&tp->pdev->dev, mapping)) 6791 if (dma_mapping_error(&tp->pdev->dev, mapping))
6785 goto dma_error; 6792 goto dma_error;
6786 6793
6787 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, 6794 if (!budget ||
6795 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6788 len, base_flags | 6796 len, base_flags |
6789 ((i == last) ? TXD_FLAG_END : 0), 6797 ((i == last) ? TXD_FLAG_END : 0),
6790 tmp_mss, vlan)) 6798 tmp_mss, vlan)) {
6791 would_hit_hwbug = 1; 6799 would_hit_hwbug = 1;
6800 break;
6801 }
6792 } 6802 }
6793 } 6803 }
6794 6804
@@ -6828,7 +6838,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6828 return NETDEV_TX_OK; 6838 return NETDEV_TX_OK;
6829 6839
6830dma_error: 6840dma_error:
6831 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 6841 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6832 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 6842 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6833drop: 6843drop:
6834 dev_kfree_skb(skb); 6844 dev_kfree_skb(skb);
@@ -7281,7 +7291,8 @@ static void tg3_free_rings(struct tg3 *tp)
7281 if (!skb) 7291 if (!skb)
7282 continue; 7292 continue;
7283 7293
7284 tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); 7294 tg3_tx_skb_unmap(tnapi, i,
7295 skb_shinfo(skb)->nr_frags - 1);
7285 7296
7286 dev_kfree_skb_any(skb); 7297 dev_kfree_skb_any(skb);
7287 } 7298 }
@@ -9200,7 +9211,7 @@ static void tg3_timer(unsigned long __opaque)
9200{ 9211{
9201 struct tg3 *tp = (struct tg3 *) __opaque; 9212 struct tg3 *tp = (struct tg3 *) __opaque;
9202 9213
9203 if (tp->irq_sync) 9214 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9204 goto restart_timer; 9215 goto restart_timer;
9205 9216
9206 spin_lock(&tp->lock); 9217 spin_lock(&tp->lock);
@@ -9223,10 +9234,9 @@ static void tg3_timer(unsigned long __opaque)
9223 } 9234 }
9224 9235
9225 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 9236 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9226 tg3_flag_set(tp, RESTART_TIMER);
9227 spin_unlock(&tp->lock); 9237 spin_unlock(&tp->lock);
9228 schedule_work(&tp->reset_task); 9238 tg3_reset_task_schedule(tp);
9229 return; 9239 goto restart_timer;
9230 } 9240 }
9231 } 9241 }
9232 9242
@@ -9674,15 +9684,14 @@ static int tg3_open(struct net_device *dev)
9674 struct tg3_napi *tnapi = &tp->napi[i]; 9684 struct tg3_napi *tnapi = &tp->napi[i];
9675 err = tg3_request_irq(tp, i); 9685 err = tg3_request_irq(tp, i);
9676 if (err) { 9686 if (err) {
9677 for (i--; i >= 0; i--) 9687 for (i--; i >= 0; i--) {
9688 tnapi = &tp->napi[i];
9678 free_irq(tnapi->irq_vec, tnapi); 9689 free_irq(tnapi->irq_vec, tnapi);
9679 break; 9690 }
9691 goto err_out2;
9680 } 9692 }
9681 } 9693 }
9682 9694
9683 if (err)
9684 goto err_out2;
9685
9686 tg3_full_lock(tp, 0); 9695 tg3_full_lock(tp, 0);
9687 9696
9688 err = tg3_init_hw(tp, 1); 9697 err = tg3_init_hw(tp, 1);
@@ -9783,7 +9792,7 @@ static int tg3_close(struct net_device *dev)
9783 struct tg3 *tp = netdev_priv(dev); 9792 struct tg3 *tp = netdev_priv(dev);
9784 9793
9785 tg3_napi_disable(tp); 9794 tg3_napi_disable(tp);
9786 cancel_work_sync(&tp->reset_task); 9795 tg3_reset_task_cancel(tp);
9787 9796
9788 netif_tx_stop_all_queues(dev); 9797 netif_tx_stop_all_queues(dev);
9789 9798
@@ -11520,7 +11529,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11520 break; 11529 break;
11521 } 11530 }
11522 11531
11523 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); 11532 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11524 dev_kfree_skb(skb); 11533 dev_kfree_skb(skb);
11525 11534
11526 if (tx_idx != tnapi->tx_prod) 11535 if (tx_idx != tnapi->tx_prod)
@@ -14228,12 +14237,30 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14228 val = tr32(MEMARB_MODE); 14237 val = tr32(MEMARB_MODE);
14229 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); 14238 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14230 14239
14231 if (tg3_flag(tp, PCIX_MODE)) { 14240 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14232 pci_read_config_dword(tp->pdev, 14241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14233 tp->pcix_cap + PCI_X_STATUS, &val); 14242 tg3_flag(tp, 5780_CLASS)) {
14234 tp->pci_fn = val & 0x7; 14243 if (tg3_flag(tp, PCIX_MODE)) {
14235 } else { 14244 pci_read_config_dword(tp->pdev,
14236 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; 14245 tp->pcix_cap + PCI_X_STATUS,
14246 &val);
14247 tp->pci_fn = val & 0x7;
14248 }
14249 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14250 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14251 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14252 NIC_SRAM_CPMUSTAT_SIG) {
14253 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14254 tp->pci_fn = tp->pci_fn ? 1 : 0;
14255 }
14256 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14257 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14258 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14259 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14260 NIC_SRAM_CPMUSTAT_SIG) {
14261 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14262 TG3_CPMU_STATUS_FSHFT_5719;
14263 }
14237 } 14264 }
14238 14265
14239 /* Get eeprom hw config before calling tg3_set_power_state(). 14266 /* Get eeprom hw config before calling tg3_set_power_state().
@@ -15665,7 +15692,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
15665 if (tp->fw) 15692 if (tp->fw)
15666 release_firmware(tp->fw); 15693 release_firmware(tp->fw);
15667 15694
15668 cancel_work_sync(&tp->reset_task); 15695 tg3_reset_task_cancel(tp);
15669 15696
15670 if (tg3_flag(tp, USE_PHYLIB)) { 15697 if (tg3_flag(tp, USE_PHYLIB)) {
15671 tg3_phy_fini(tp); 15698 tg3_phy_fini(tp);
@@ -15699,7 +15726,7 @@ static int tg3_suspend(struct device *device)
15699 if (!netif_running(dev)) 15726 if (!netif_running(dev))
15700 return 0; 15727 return 0;
15701 15728
15702 flush_work_sync(&tp->reset_task); 15729 tg3_reset_task_cancel(tp);
15703 tg3_phy_stop(tp); 15730 tg3_phy_stop(tp);
15704 tg3_netif_stop(tp); 15731 tg3_netif_stop(tp);
15705 15732
@@ -15812,12 +15839,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15812 tg3_netif_stop(tp); 15839 tg3_netif_stop(tp);
15813 15840
15814 del_timer_sync(&tp->timer); 15841 del_timer_sync(&tp->timer);
15815 tg3_flag_clear(tp, RESTART_TIMER);
15816 15842
15817 /* Want to make sure that the reset task doesn't run */ 15843 /* Want to make sure that the reset task doesn't run */
15818 cancel_work_sync(&tp->reset_task); 15844 tg3_reset_task_cancel(tp);
15819 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 15845 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15820 tg3_flag_clear(tp, RESTART_TIMER);
15821 15846
15822 netif_device_detach(netdev); 15847 netif_device_detach(netdev);
15823 15848
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index f32f288134c7..94b4bd049a33 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1095,6 +1095,11 @@
1095#define TG3_CPMU_CLCK_ORIDE 0x00003624 1095#define TG3_CPMU_CLCK_ORIDE 0x00003624
1096#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 1096#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
1097 1097
1098#define TG3_CPMU_STATUS 0x0000362c
1099#define TG3_CPMU_STATUS_FMSK_5717 0x20000000
1100#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000
1101#define TG3_CPMU_STATUS_FSHFT_5719 30
1102
1098#define TG3_CPMU_CLCK_STAT 0x00003630 1103#define TG3_CPMU_CLCK_STAT 0x00003630
1099#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 1104#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
1100#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 1105#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -2128,6 +2133,10 @@
2128#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 2133#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
2129#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 2134#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
2130 2135
2136#define NIC_SRAM_CPMU_STATUS 0x00000e00
2137#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c
2138#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff
2139
2131#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 2140#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000
2132 2141
2133#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 2142#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000
@@ -2344,9 +2353,13 @@
2344#define APE_PER_LOCK_GRANT_DRIVER 0x00001000 2353#define APE_PER_LOCK_GRANT_DRIVER 0x00001000
2345 2354
2346/* APE convenience enumerations. */ 2355/* APE convenience enumerations. */
2347#define TG3_APE_LOCK_GRC 1 2356#define TG3_APE_LOCK_PHY0 0
2348#define TG3_APE_LOCK_MEM 4 2357#define TG3_APE_LOCK_GRC 1
2349#define TG3_APE_LOCK_GPIO 7 2358#define TG3_APE_LOCK_PHY1 2
2359#define TG3_APE_LOCK_PHY2 3
2360#define TG3_APE_LOCK_MEM 4
2361#define TG3_APE_LOCK_PHY3 5
2362#define TG3_APE_LOCK_GPIO 7
2350 2363
2351#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 2364#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
2352 2365
@@ -2866,7 +2879,6 @@ enum TG3_FLAGS {
2866 TG3_FLAG_JUMBO_CAPABLE, 2879 TG3_FLAG_JUMBO_CAPABLE,
2867 TG3_FLAG_CHIP_RESETTING, 2880 TG3_FLAG_CHIP_RESETTING,
2868 TG3_FLAG_INIT_COMPLETE, 2881 TG3_FLAG_INIT_COMPLETE,
2869 TG3_FLAG_RESTART_TIMER,
2870 TG3_FLAG_TSO_BUG, 2882 TG3_FLAG_TSO_BUG,
2871 TG3_FLAG_IS_5788, 2883 TG3_FLAG_IS_5788,
2872 TG3_FLAG_MAX_RXPEND_64, 2884 TG3_FLAG_MAX_RXPEND_64,
@@ -2909,6 +2921,7 @@ enum TG3_FLAGS {
2909 TG3_FLAG_APE_HAS_NCSI, 2921 TG3_FLAG_APE_HAS_NCSI,
2910 TG3_FLAG_5717_PLUS, 2922 TG3_FLAG_5717_PLUS,
2911 TG3_FLAG_4K_FIFO_LIMIT, 2923 TG3_FLAG_4K_FIFO_LIMIT,
2924 TG3_FLAG_RESET_TASK_PENDING,
2912 2925
2913 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ 2926 /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
2914 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ 2927 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 5d7872ecff52..7f3091e7eb42 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -25,6 +25,7 @@
25#include <linux/if_ether.h> 25#include <linux/if_ether.h>
26#include <linux/ip.h> 26#include <linux/ip.h>
27#include <linux/prefetch.h> 27#include <linux/prefetch.h>
28#include <linux/module.h>
28 29
29#include "bnad.h" 30#include "bnad.h"
30#include "bna.h" 31#include "bna.h"
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 1b0ba8c819f7..56624d303487 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -35,7 +35,7 @@
35#include <asm/mach-types.h> 35#include <asm/mach-types.h>
36 36
37#include <mach/at91rm9200_emac.h> 37#include <mach/at91rm9200_emac.h>
38#include <mach/gpio.h> 38#include <asm/gpio.h>
39#include <mach/board.h> 39#include <mach/board.h>
40 40
41#include "at91_ether.h" 41#include "at91_ether.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index da5a5d9b8aff..90ff1318cc05 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -40,6 +40,7 @@
40#include <net/netevent.h> 40#include <net/netevent.h>
41#include <linux/highmem.h> 41#include <linux/highmem.h>
42#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
43#include <linux/export.h>
43 44
44#include "common.h" 45#include "common.h"
45#include "regs.h" 46#include "regs.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 41540978a173..70fec8b1140f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -35,6 +35,7 @@
35#include <linux/if_vlan.h> 35#include <linux/if_vlan.h>
36#include <linux/jhash.h> 36#include <linux/jhash.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/export.h>
38#include <net/neighbour.h> 39#include <net/neighbour.h>
39#include "common.h" 40#include "common.h"
40#include "t3cdev.h" 41#include "t3cdev.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a2d323c473f8..6ac77a62f361 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -37,6 +37,9 @@
37#include <linux/if.h> 37#include <linux/if.h>
38#include <linux/if_vlan.h> 38#include <linux/if_vlan.h>
39#include <linux/jhash.h> 39#include <linux/jhash.h>
40#include <linux/module.h>
41#include <linux/debugfs.h>
42#include <linux/seq_file.h>
40#include <net/neighbour.h> 43#include <net/neighbour.h>
41#include "cxgb4.h" 44#include "cxgb4.h"
42#include "l2t.h" 45#include "l2t.h"
@@ -503,10 +506,6 @@ struct l2t_data *t4_init_l2t(void)
503 return d; 506 return d;
504} 507}
505 508
506#include <linux/module.h>
507#include <linux/debugfs.h>
508#include <linux/seq_file.h>
509
510static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) 509static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
511{ 510{
512 struct l2t_entry *l2tab = seq->private; 511 struct l2t_entry *l2tab = seq->private;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ddc16985d0f6..140254c7cba9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -40,6 +40,7 @@
40#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
41#include <linux/jiffies.h> 41#include <linux/jiffies.h>
42#include <linux/prefetch.h> 42#include <linux/prefetch.h>
43#include <linux/export.h>
43#include <net/ipv6.h> 44#include <net/ipv6.h>
44#include <net/tcp.h> 45#include <net/tcp.h>
45#include "cxgb4.h" 46#include "cxgb4.h"
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 6cbb81ccc02e..1f8648f099c7 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_CIRRUS
6 bool "Cirrus devices" 6 bool "Cirrus devices"
7 default y 7 default y
8 depends on ISA || EISA || MACH_IXDP2351 || ARCH_IXDP2X01 \ 8 depends on ISA || EISA || MACH_IXDP2351 || ARCH_IXDP2X01 \
9 || MACH_MX31ADS || MACH_QQ2440 || (ARM && ARCH_EP93XX) 9 || MACH_MX31ADS || MACH_QQ2440 || (ARM && ARCH_EP93XX) || MAC
10 ---help--- 10 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y 11 If you have a network (Ethernet) card belonging to this class, say Y
12 and read the Ethernet-HOWTO, available from 12 and read the Ethernet-HOWTO, available from
@@ -47,4 +47,16 @@ config EP93XX_ETH
47 This is a driver for the ethernet hardware included in EP93xx CPUs. 47 This is a driver for the ethernet hardware included in EP93xx CPUs.
48 Say Y if you are building a kernel for EP93xx based devices. 48 Say Y if you are building a kernel for EP93xx based devices.
49 49
50config MAC89x0
51 tristate "Macintosh CS89x0 based ethernet cards"
52 depends on MAC
53 ---help---
54 Support for CS89x0 chipset based Ethernet cards. If you have a
55 Nubus or LC-PDS network (Ethernet) card of this type, say Y and
56 read the Ethernet-HOWTO, available from
57 <http://www.tldp.org/docs.html#howto>.
58
59 To compile this driver as a module, choose M here. This module will
60 be called mac89x0.
61
50endif # NET_VENDOR_CIRRUS 62endif # NET_VENDOR_CIRRUS
diff --git a/drivers/net/ethernet/cirrus/Makefile b/drivers/net/ethernet/cirrus/Makefile
index 14bd77e0cb57..ca245e2b5d98 100644
--- a/drivers/net/ethernet/cirrus/Makefile
+++ b/drivers/net/ethernet/cirrus/Makefile
@@ -4,3 +4,4 @@
4 4
5obj-$(CONFIG_CS89x0) += cs89x0.o 5obj-$(CONFIG_CS89x0) += cs89x0.o
6obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o 6obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
7obj-$(CONFIG_MAC89x0) += mac89x0.o
diff --git a/drivers/net/ethernet/apple/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 83781f316d1f..83781f316d1f 100644
--- a/drivers/net/ethernet/apple/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 824b8e6021f6..2c7b36673dfc 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -318,8 +318,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
318 318
319 if (msecs > 4000) { 319 if (msecs > 4000) {
320 dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); 320 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
321 if (!lancer_chip(adapter)) 321 be_detect_dump_ue(adapter);
322 be_detect_dump_ue(adapter);
323 return -1; 322 return -1;
324 } 323 }
325 324
@@ -1540,7 +1539,14 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1540 1539
1541 req->if_flags_mask = req->if_flags = 1540 req->if_flags_mask = req->if_flags =
1542 cpu_to_le32(BE_IF_FLAGS_MULTICAST); 1541 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1543 req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev)); 1542
1543 /* Reset mcast promisc mode if already set by setting mask
1544 * and not setting flags field
1545 */
1546 req->if_flags_mask |=
1547 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1548
1549 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1544 netdev_for_each_mc_addr(ha, adapter->netdev) 1550 netdev_for_each_mc_addr(ha, adapter->netdev)
1545 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); 1551 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1546 } 1552 }
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index fbc8a915519e..f2c89e3ccabd 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -48,6 +48,8 @@
48/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */ 48/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
49#define SLIPORT_STATUS_OFFSET 0x404 49#define SLIPORT_STATUS_OFFSET 0x404
50#define SLIPORT_CONTROL_OFFSET 0x408 50#define SLIPORT_CONTROL_OFFSET 0x408
51#define SLIPORT_ERROR1_OFFSET 0x40C
52#define SLIPORT_ERROR2_OFFSET 0x410
51 53
52#define SLIPORT_STATUS_ERR_MASK 0x80000000 54#define SLIPORT_STATUS_ERR_MASK 0x80000000
53#define SLIPORT_STATUS_RN_MASK 0x01000000 55#define SLIPORT_STATUS_RN_MASK 0x01000000
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 21804972fa2f..bf266a00c774 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/prefetch.h> 18#include <linux/prefetch.h>
19#include <linux/module.h>
19#include "be.h" 20#include "be.h"
20#include "be_cmds.h" 21#include "be_cmds.h"
21#include <asm/div64.h> 22#include <asm/div64.h>
@@ -1905,6 +1906,8 @@ loop_continue:
1905 be_rx_stats_update(rxo, rxcp); 1906 be_rx_stats_update(rxo, rxcp);
1906 } 1907 }
1907 1908
1909 be_cq_notify(adapter, rx_cq->id, false, work_done);
1910
1908 /* Refill the queue */ 1911 /* Refill the queue */
1909 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) 1912 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1910 be_post_rx_frags(rxo, GFP_ATOMIC); 1913 be_post_rx_frags(rxo, GFP_ATOMIC);
@@ -1912,10 +1915,8 @@ loop_continue:
1912 /* All consumed */ 1915 /* All consumed */
1913 if (work_done < budget) { 1916 if (work_done < budget) {
1914 napi_complete(napi); 1917 napi_complete(napi);
1915 be_cq_notify(adapter, rx_cq->id, true, work_done); 1918 /* Arm CQ */
1916 } else { 1919 be_cq_notify(adapter, rx_cq->id, true, 0);
1917 /* More to be consumed; continue with interrupts disabled */
1918 be_cq_notify(adapter, rx_cq->id, false, work_done);
1919 } 1920 }
1920 return work_done; 1921 return work_done;
1921} 1922}
@@ -1977,42 +1978,62 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1977 1978
1978void be_detect_dump_ue(struct be_adapter *adapter) 1979void be_detect_dump_ue(struct be_adapter *adapter)
1979{ 1980{
1980 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; 1981 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1982 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1981 u32 i; 1983 u32 i;
1982 1984
1983 pci_read_config_dword(adapter->pdev, 1985 if (lancer_chip(adapter)) {
1984 PCICFG_UE_STATUS_LOW, &ue_status_lo); 1986 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1985 pci_read_config_dword(adapter->pdev, 1987 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1986 PCICFG_UE_STATUS_HIGH, &ue_status_hi); 1988 sliport_err1 = ioread32(adapter->db +
1987 pci_read_config_dword(adapter->pdev, 1989 SLIPORT_ERROR1_OFFSET);
1988 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask); 1990 sliport_err2 = ioread32(adapter->db +
1989 pci_read_config_dword(adapter->pdev, 1991 SLIPORT_ERROR2_OFFSET);
1990 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask); 1992 }
1993 } else {
1994 pci_read_config_dword(adapter->pdev,
1995 PCICFG_UE_STATUS_LOW, &ue_lo);
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_HIGH, &ue_hi);
1998 pci_read_config_dword(adapter->pdev,
1999 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2000 pci_read_config_dword(adapter->pdev,
2001 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
1991 2002
1992 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); 2003 ue_lo = (ue_lo & (~ue_lo_mask));
1993 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); 2004 ue_hi = (ue_hi & (~ue_hi_mask));
2005 }
1994 2006
1995 if (ue_status_lo || ue_status_hi) { 2007 if (ue_lo || ue_hi ||
2008 sliport_status & SLIPORT_STATUS_ERR_MASK) {
1996 adapter->ue_detected = true; 2009 adapter->ue_detected = true;
1997 adapter->eeh_err = true; 2010 adapter->eeh_err = true;
1998 dev_err(&adapter->pdev->dev, "UE Detected!!\n"); 2011 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1999 } 2012 }
2000 2013
2001 if (ue_status_lo) { 2014 if (ue_lo) {
2002 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { 2015 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2003 if (ue_status_lo & 1) 2016 if (ue_lo & 1)
2004 dev_err(&adapter->pdev->dev, 2017 dev_err(&adapter->pdev->dev,
2005 "UE: %s bit set\n", ue_status_low_desc[i]); 2018 "UE: %s bit set\n", ue_status_low_desc[i]);
2006 } 2019 }
2007 } 2020 }
2008 if (ue_status_hi) { 2021 if (ue_hi) {
2009 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) { 2022 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2010 if (ue_status_hi & 1) 2023 if (ue_hi & 1)
2011 dev_err(&adapter->pdev->dev, 2024 dev_err(&adapter->pdev->dev,
2012 "UE: %s bit set\n", ue_status_hi_desc[i]); 2025 "UE: %s bit set\n", ue_status_hi_desc[i]);
2013 } 2026 }
2014 } 2027 }
2015 2028
2029 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2030 dev_err(&adapter->pdev->dev,
2031 "sliport status 0x%x\n", sliport_status);
2032 dev_err(&adapter->pdev->dev,
2033 "sliport error1 0x%x\n", sliport_err1);
2034 dev_err(&adapter->pdev->dev,
2035 "sliport error2 0x%x\n", sliport_err2);
2036 }
2016} 2037}
2017 2038
2018static void be_worker(struct work_struct *work) 2039static void be_worker(struct work_struct *work)
@@ -2022,7 +2043,7 @@ static void be_worker(struct work_struct *work)
2022 struct be_rx_obj *rxo; 2043 struct be_rx_obj *rxo;
2023 int i; 2044 int i;
2024 2045
2025 if (!adapter->ue_detected && !lancer_chip(adapter)) 2046 if (!adapter->ue_detected)
2026 be_detect_dump_ue(adapter); 2047 be_detect_dump_ue(adapter);
2027 2048
2028 /* when interrupts are not yet enabled, just reap any pending 2049 /* when interrupts are not yet enabled, just reap any pending
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index bdb348a5ccf6..251b635fe75a 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -22,6 +22,7 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/module.h>
25#include <net/ethoc.h> 26#include <net/ethoc.h>
26 27
27static int buffer_size = 0x8000; /* 32 KBytes */ 28static int buffer_size = 0x8000; /* 32 KBytes */
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 1cf671643d1f..c520cfd3b298 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -7,8 +7,7 @@ config NET_VENDOR_FREESCALE
7 default y 7 default y
8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ 8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
9 M523x || M527x || M5272 || M528x || M520x || M532x || \ 9 M523x || M527x || M5272 || M528x || M520x || M532x || \
10 ARCH_MXC || ARCH_MXS || \ 10 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
11 (PPC_MPC52xx && PPC_BESTCOMM)
12 ---help--- 11 ---help---
13 If you have a network (Ethernet) card belonging to this class, say Y 12 If you have a network (Ethernet) card belonging to this class, say Y
14 and read the Ethernet-HOWTO, available from 13 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 46d690a92c0b..b5dc0273a1d1 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/stddef.h> 19#include <linux/stddef.h>
20#include <linux/module.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21#include <linux/netdevice.h> 22#include <linux/netdevice.h>
22#include <linux/etherdevice.h> 23#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/i825xx/3c505.c b/drivers/net/ethernet/i825xx/3c505.c
index 40e1a175fceb..ba82a266051d 100644
--- a/drivers/net/ethernet/i825xx/3c505.c
+++ b/drivers/net/ethernet/i825xx/3c505.c
@@ -126,15 +126,13 @@
126 * 126 *
127 *********************************************************/ 127 *********************************************************/
128 128
129#define filename __FILE__
130
131#define timeout_msg "*** timeout at %s:%s (line %d) ***\n" 129#define timeout_msg "*** timeout at %s:%s (line %d) ***\n"
132#define TIMEOUT_MSG(lineno) \ 130#define TIMEOUT_MSG(lineno) \
133 pr_notice(timeout_msg, filename, __func__, (lineno)) 131 pr_notice(timeout_msg, __FILE__, __func__, (lineno))
134 132
135#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n" 133#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n"
136#define INVALID_PCB_MSG(len) \ 134#define INVALID_PCB_MSG(len) \
137 pr_notice(invalid_pcb_msg, (len), filename, __func__, __LINE__) 135 pr_notice(invalid_pcb_msg, (len), __FILE__, __func__, __LINE__)
138 136
139#define search_msg "%s: Looking for 3c505 adapter at address %#x..." 137#define search_msg "%s: Looking for 3c505 adapter at address %#x..."
140 138
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 61029dc7fa6f..76213162fbe3 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -5,7 +5,11 @@
5config NET_VENDOR_INTEL 5config NET_VENDOR_INTEL
6 bool "Intel devices" 6 bool "Intel devices"
7 default y 7 default y
8 depends on PCI || PCI_MSI 8 depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
9 ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
10 GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \
11 (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
12 EXPERIMENTAL
9 ---help--- 13 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 14 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 15 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ae17cd1a907f..5a2fdf7a00c8 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2810,6 +2810,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2810 2810
2811 e100_get_defaults(nic); 2811 e100_get_defaults(nic);
2812 2812
2813 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2814 if (nic->mac < mac_82558_D101_A4)
2815 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2816
2813 /* locks must be initialized before calling hw_reset */ 2817 /* locks must be initialized before calling hw_reset */
2814 spin_lock_init(&nic->cb_lock); 2818 spin_lock_init(&nic->cb_lock);
2815 spin_lock_init(&nic->cmd_lock); 2819 spin_lock_init(&nic->cmd_lock);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 6a17c62cb86f..e2a80a283fd3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -866,8 +866,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
866 866
867 if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE, 867 if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
868 &hw->adapter->state)) { 868 &hw->adapter->state)) {
869 WARN(1, "e1000e: %s: contention for Phy access\n", 869 e_dbg("contention for Phy access\n");
870 hw->adapter->netdev->name);
871 return -E1000_ERR_PHY; 870 return -E1000_ERR_PHY;
872 } 871 }
873 872
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 4dd9b63273f6..20e93b08e7f3 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -27,6 +27,7 @@
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/module.h>
30#include <linux/pci.h> 31#include <linux/pci.h>
31 32
32#include "e1000.h" 33#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 7edf31efe756..b17d7c20f817 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1687,7 +1687,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1687 if (ret_val) 1687 if (ret_val)
1688 goto out; 1688 goto out;
1689 1689
1690 is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT); 1690 is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
1691 1691
1692 /* Populate the phy structure with cable length in meters */ 1692 /* Populate the phy structure with cable length in meters */
1693 phy->min_cable_length = phy_data / (is_cm ? 100 : 1); 1693 phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 834f044be4c3..f1365fef4ed2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3344,7 +3344,7 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3344static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, 3344static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3345 u32 length) 3345 u32 length)
3346{ 3346{
3347 u32 hicr, i; 3347 u32 hicr, i, bi;
3348 u32 hdr_size = sizeof(struct ixgbe_hic_hdr); 3348 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3349 u8 buf_len, dword_len; 3349 u8 buf_len, dword_len;
3350 3350
@@ -3398,9 +3398,9 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3398 dword_len = hdr_size >> 2; 3398 dword_len = hdr_size >> 2;
3399 3399
3400 /* first pull in the header so we know the buffer length */ 3400 /* first pull in the header so we know the buffer length */
3401 for (i = 0; i < dword_len; i++) { 3401 for (bi = 0; bi < dword_len; bi++) {
3402 buffer[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); 3402 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3403 le32_to_cpus(&buffer[i]); 3403 le32_to_cpus(&buffer[bi]);
3404 } 3404 }
3405 3405
3406 /* If there is any thing in data position pull it in */ 3406 /* If there is any thing in data position pull it in */
@@ -3414,12 +3414,14 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
3414 goto out; 3414 goto out;
3415 } 3415 }
3416 3416
3417 /* Calculate length in DWORDs, add one for odd lengths */ 3417 /* Calculate length in DWORDs, add 3 for odd lengths */
3418 dword_len = (buf_len + 1) >> 2; 3418 dword_len = (buf_len + 3) >> 2;
3419 3419
3420 /* Pull in the rest of the buffer (i is where we left off)*/ 3420 /* Pull in the rest of the buffer (bi is where we left off)*/
3421 for (; i < buf_len; i++) 3421 for (; bi <= dword_len; bi++) {
3422 buffer[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); 3422 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3423 le32_to_cpus(&buffer[bi]);
3424 }
3423 3425
3424out: 3426out:
3425 return ret_val; 3427 return ret_val;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 3631d639d86a..33b93ffb87cb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -561,11 +561,12 @@ static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
561 struct ixgbe_adapter *adapter = netdev_priv(dev); 561 struct ixgbe_adapter *adapter = netdev_priv(dev);
562 struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; 562 struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
563 563
564 ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
565
564 /* No IEEE PFC settings available */ 566 /* No IEEE PFC settings available */
565 if (!my_ets) 567 if (!my_ets)
566 return -EINVAL; 568 return 0;
567 569
568 ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs;
569 ets->cbs = my_ets->cbs; 570 ets->cbs = my_ets->cbs;
570 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); 571 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
571 memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); 572 memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
@@ -621,11 +622,12 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
621 struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; 622 struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
622 int i; 623 int i;
623 624
625 pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
626
624 /* No IEEE PFC settings available */ 627 /* No IEEE PFC settings available */
625 if (!my_pfc) 628 if (!my_pfc)
626 return -EINVAL; 629 return 0;
627 630
628 pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs;
629 pfc->pfc_en = my_pfc->pfc_en; 631 pfc->pfc_en = my_pfc->pfc_en;
630 pfc->mbc = my_pfc->mbc; 632 pfc->mbc = my_pfc->mbc;
631 pfc->delay = my_pfc->delay; 633 pfc->delay = my_pfc->delay;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 09b8e88b2999..8ef92d1a6aa1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3345,34 +3345,25 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3345 3345
3346 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3346 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3347 3347
3348 /* reconfigure the hardware */
3349 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
3350#ifdef IXGBE_FCOE 3348#ifdef IXGBE_FCOE
3351 if (adapter->netdev->features & NETIF_F_FCOE_MTU) 3349 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3352 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 3350 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3353#endif 3351#endif
3352
3353 /* reconfigure the hardware */
3354 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
3354 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, 3355 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3355 DCB_TX_CONFIG); 3356 DCB_TX_CONFIG);
3356 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, 3357 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3357 DCB_RX_CONFIG); 3358 DCB_RX_CONFIG);
3358 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); 3359 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3359 } else { 3360 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
3360 struct net_device *dev = adapter->netdev; 3361 ixgbe_dcb_hw_ets(&adapter->hw,
3361 3362 adapter->ixgbe_ieee_ets,
3362 if (adapter->ixgbe_ieee_ets) { 3363 max_frame);
3363 struct ieee_ets *ets = adapter->ixgbe_ieee_ets; 3364 ixgbe_dcb_hw_pfc_config(&adapter->hw,
3364 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; 3365 adapter->ixgbe_ieee_pfc->pfc_en,
3365 3366 adapter->ixgbe_ieee_ets->prio_tc);
3366 ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
3367 }
3368
3369 if (adapter->ixgbe_ieee_pfc) {
3370 struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc;
3371 u8 *prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
3372
3373 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en,
3374 prio_tc);
3375 }
3376 } 3367 }
3377 3368
3378 /* Enable RSS Hash per TC */ 3369 /* Enable RSS Hash per TC */
@@ -6125,7 +6116,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6125 autoneg = hw->phy.autoneg_advertised; 6116 autoneg = hw->phy.autoneg_advertised;
6126 if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) 6117 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
6127 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 6118 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
6128 hw->mac.autotry_restart = false;
6129 if (hw->mac.ops.setup_link) 6119 if (hw->mac.ops.setup_link)
6130 hw->mac.ops.setup_link(hw, autoneg, negotiation, true); 6120 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
6131 6121
@@ -7589,13 +7579,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7589 goto err_eeprom; 7579 goto err_eeprom;
7590 } 7580 }
7591 7581
7592 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
7593 if (hw->mac.ops.disable_tx_laser &&
7594 ((hw->phy.multispeed_fiber) ||
7595 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
7596 (hw->mac.type == ixgbe_mac_82599EB))))
7597 hw->mac.ops.disable_tx_laser(hw);
7598
7599 setup_timer(&adapter->service_timer, &ixgbe_service_timer, 7582 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
7600 (unsigned long) adapter); 7583 (unsigned long) adapter);
7601 7584
@@ -7693,6 +7676,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7693 if (err) 7676 if (err)
7694 goto err_register; 7677 goto err_register;
7695 7678
7679 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
7680 if (hw->mac.ops.disable_tx_laser &&
7681 ((hw->phy.multispeed_fiber) ||
7682 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
7683 (hw->mac.type == ixgbe_mac_82599EB))))
7684 hw->mac.ops.disable_tx_laser(hw);
7685
7696 /* carrier off reporting is important to ethtool even BEFORE open */ 7686 /* carrier off reporting is important to ethtool even BEFORE open */
7697 netif_carrier_off(netdev); 7687 netif_carrier_off(netdev);
7698 7688
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index db95731863d7..00fcd39ad666 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -442,12 +442,14 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
442 442
443int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) 443int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
444{ 444{
445#ifdef CONFIG_PCI_IOV
445 int i; 446 int i;
446 for (i = 0; i < adapter->num_vfs; i++) { 447 for (i = 0; i < adapter->num_vfs; i++) {
447 if (adapter->vfinfo[i].vfdev->dev_flags & 448 if (adapter->vfinfo[i].vfdev->dev_flags &
448 PCI_DEV_FLAGS_ASSIGNED) 449 PCI_DEV_FLAGS_ASSIGNED)
449 return true; 450 return true;
450 } 451 }
452#endif
451 return false; 453 return false;
452} 454}
453 455
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 5a7e1eb33599..df04f1a3857c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -43,9 +43,11 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
43 int vf, struct ifla_vf_info *ivi); 43 int vf, struct ifla_vf_info *ivi);
44void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); 44void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
45void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); 45void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
46int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
47#ifdef CONFIG_PCI_IOV
46void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, 48void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
47 const struct ixgbe_info *ii); 49 const struct ixgbe_info *ii);
48int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter); 50#endif
49 51
50 52
51#endif /* _IXGBE_SRIOV_H_ */ 53#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 5e92cc2079bd..4c8e19951d57 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -54,7 +54,7 @@ char ixgbevf_driver_name[] = "ixgbevf";
54static const char ixgbevf_driver_string[] = 54static const char ixgbevf_driver_string[] =
55 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 55 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
56 56
57#define DRV_VERSION "2.1.0-k" 57#define DRV_VERSION "2.2.0-k"
58const char ixgbevf_driver_version[] = DRV_VERSION; 58const char ixgbevf_driver_version[] = DRV_VERSION;
59static char ixgbevf_copyright[] = 59static char ixgbevf_copyright[] =
60 "Copyright (c) 2009 - 2010 Intel Corporation."; 60 "Copyright (c) 2009 - 2010 Intel Corporation.";
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cbd026f3bc57..fdc6c394c683 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -366,17 +366,6 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
366 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); 366 gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
367 } 367 }
368 } else { 368 } else {
369 if (hw->chip_id >= CHIP_ID_YUKON_OPT) {
370 u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2);
371
372 /* enable PHY Reverse Auto-Negotiation */
373 ctrl2 |= 1u << 13;
374
375 /* Write PHY changes (SW-reset must follow) */
376 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2);
377 }
378
379
380 /* disable energy detect */ 369 /* disable energy detect */
381 ctrl &= ~PHY_M_PC_EN_DET_MSK; 370 ctrl &= ~PHY_M_PC_EN_DET_MSK;
382 371
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 116cae334dad..8be20e7ea3d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -34,6 +34,7 @@
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/mm.h> 36#include <linux/mm.h>
37#include <linux/export.h>
37#include <linux/bitmap.h> 38#include <linux/bitmap.h>
38#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
39#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 32f947154c33..45aea9c3ae2c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/workqueue.h> 34#include <linux/workqueue.h>
35#include <linux/module.h>
35 36
36#include "mlx4.h" 37#include "mlx4.h"
37 38
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 23cee7b6af91..78f5a1a0b8c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -34,6 +34,7 @@
34 34
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/export.h>
37#include <linux/pci.h> 38#include <linux/pci.h>
38#include <linux/errno.h> 39#include <linux/errno.h>
39 40
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index bd8ef9f2fa71..499a5168892a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -35,6 +35,7 @@
35 */ 35 */
36 36
37#include <linux/hardirq.h> 37#include <linux/hardirq.h>
38#include <linux/export.h>
38#include <linux/gfp.h> 39#include <linux/gfp.h>
39 40
40#include <linux/mlx4/cmd.h> 41#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 90f2cd24faac..d901b4267537 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -39,6 +39,7 @@
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
41#include <linux/tcp.h> 41#include <linux/tcp.h>
42#include <linux/moduleparam.h>
42 43
43#include "mlx4_en.h" 44#include "mlx4_en.h"
44 45
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 1ad1f6029af8..24ee96775996 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/export.h>
36#include <linux/mm.h> 37#include <linux/mm.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38 39
@@ -484,7 +485,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
484 485
485 mlx4_mtt_cleanup(dev, &eq->mtt); 486 mlx4_mtt_cleanup(dev, &eq->mtt);
486 for (i = 0; i < npages; ++i) 487 for (i = 0; i < npages; ++i)
487 pci_free_consistent(dev->pdev, PAGE_SIZE, 488 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
488 eq->page_list[i].buf, 489 eq->page_list[i].buf,
489 eq->page_list[i].map); 490 eq->page_list[i].map);
490 491
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index ed452ddfe342..435ca6e49734 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/mlx4/cmd.h> 35#include <linux/mlx4/cmd.h>
36#include <linux/module.h>
36#include <linux/cache.h> 37#include <linux/cache.h>
37 38
38#include "fw.h" 39#include "fw.h"
@@ -205,6 +206,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
205#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 206#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
206#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 207#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
207#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 208#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
209#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
210#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
208#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 211#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
209#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 212#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
210#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 213#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
@@ -319,6 +322,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
319 dev_cap->reserved_pds = field >> 4; 322 dev_cap->reserved_pds = field >> 4;
320 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); 323 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
321 dev_cap->max_pds = 1 << (field & 0x3f); 324 dev_cap->max_pds = 1 << (field & 0x3f);
325 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
326 dev_cap->reserved_xrcds = field >> 4;
327 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
328 dev_cap->max_xrcds = 1 << (field & 0x1f);
322 329
323 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); 330 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
324 dev_cap->rdmarc_entry_sz = size; 331 dev_cap->rdmarc_entry_sz = size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 1e8ecc3708e2..bf5ec2286528 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -93,6 +93,8 @@ struct mlx4_dev_cap {
93 int max_mcgs; 93 int max_mcgs;
94 int reserved_pds; 94 int reserved_pds;
95 int max_pds; 95 int max_pds;
96 int reserved_xrcds;
97 int max_xrcds;
96 int qpc_entry_sz; 98 int qpc_entry_sz;
97 int rdmarc_entry_sz; 99 int rdmarc_entry_sz;
98 int altc_entry_sz; 100 int altc_entry_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 73c94fcdfddf..ca6feb55bd94 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/export.h>
35 36
36#include "mlx4.h" 37#include "mlx4.h"
37 38
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f0ee35df4dd7..94bbc85a532d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -96,6 +96,8 @@ MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
96static int log_num_vlan; 96static int log_num_vlan;
97module_param_named(log_num_vlan, log_num_vlan, int, 0444); 97module_param_named(log_num_vlan, log_num_vlan, int, 0444);
98MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 98MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
99/* Log2 max number of VLANs per ETH port (0-7) */
100#define MLX4_LOG_NUM_VLANS 7
99 101
100static int use_prio; 102static int use_prio;
101module_param_named(use_prio, use_prio, bool, 0444); 103module_param_named(use_prio, use_prio, bool, 0444);
@@ -220,6 +222,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
220 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 222 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
221 dev->caps.reserved_uars = dev_cap->reserved_uars; 223 dev->caps.reserved_uars = dev_cap->reserved_uars;
222 dev->caps.reserved_pds = dev_cap->reserved_pds; 224 dev->caps.reserved_pds = dev_cap->reserved_pds;
225 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
226 dev_cap->reserved_xrcds : 0;
227 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
228 dev_cap->max_xrcds : 0;
223 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; 229 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
224 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 230 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
225 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 231 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
@@ -230,7 +236,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
230 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 236 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
231 237
232 dev->caps.log_num_macs = log_num_mac; 238 dev->caps.log_num_macs = log_num_mac;
233 dev->caps.log_num_vlans = log_num_vlan; 239 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
234 dev->caps.log_num_prios = use_prio ? 3 : 0; 240 dev->caps.log_num_prios = use_prio ? 3 : 0;
235 241
236 for (i = 1; i <= dev->caps.num_ports; ++i) { 242 for (i = 1; i <= dev->caps.num_ports; ++i) {
@@ -912,11 +918,18 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
912 goto err_kar_unmap; 918 goto err_kar_unmap;
913 } 919 }
914 920
921 err = mlx4_init_xrcd_table(dev);
922 if (err) {
923 mlx4_err(dev, "Failed to initialize "
924 "reliable connection domain table, aborting.\n");
925 goto err_pd_table_free;
926 }
927
915 err = mlx4_init_mr_table(dev); 928 err = mlx4_init_mr_table(dev);
916 if (err) { 929 if (err) {
917 mlx4_err(dev, "Failed to initialize " 930 mlx4_err(dev, "Failed to initialize "
918 "memory region table, aborting.\n"); 931 "memory region table, aborting.\n");
919 goto err_pd_table_free; 932 goto err_xrcd_table_free;
920 } 933 }
921 934
922 err = mlx4_init_eq_table(dev); 935 err = mlx4_init_eq_table(dev);
@@ -998,6 +1011,13 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
998 "ib capabilities (%d). Continuing with " 1011 "ib capabilities (%d). Continuing with "
999 "caps = 0\n", port, err); 1012 "caps = 0\n", port, err);
1000 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1013 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1014
1015 err = mlx4_check_ext_port_caps(dev, port);
1016 if (err)
1017 mlx4_warn(dev, "failed to get port %d extended "
1018 "port capabilities support info (%d)."
1019 " Assuming not supported\n", port, err);
1020
1001 err = mlx4_SET_PORT(dev, port); 1021 err = mlx4_SET_PORT(dev, port);
1002 if (err) { 1022 if (err) {
1003 mlx4_err(dev, "Failed to set port %d, aborting\n", 1023 mlx4_err(dev, "Failed to set port %d, aborting\n",
@@ -1033,6 +1053,9 @@ err_eq_table_free:
1033err_mr_table_free: 1053err_mr_table_free:
1034 mlx4_cleanup_mr_table(dev); 1054 mlx4_cleanup_mr_table(dev);
1035 1055
1056err_xrcd_table_free:
1057 mlx4_cleanup_xrcd_table(dev);
1058
1036err_pd_table_free: 1059err_pd_table_free:
1037 mlx4_cleanup_pd_table(dev); 1060 mlx4_cleanup_pd_table(dev);
1038 1061
@@ -1355,6 +1378,7 @@ err_port:
1355 mlx4_cmd_use_polling(dev); 1378 mlx4_cmd_use_polling(dev);
1356 mlx4_cleanup_eq_table(dev); 1379 mlx4_cleanup_eq_table(dev);
1357 mlx4_cleanup_mr_table(dev); 1380 mlx4_cleanup_mr_table(dev);
1381 mlx4_cleanup_xrcd_table(dev);
1358 mlx4_cleanup_pd_table(dev); 1382 mlx4_cleanup_pd_table(dev);
1359 mlx4_cleanup_uar_table(dev); 1383 mlx4_cleanup_uar_table(dev);
1360 1384
@@ -1416,6 +1440,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1416 mlx4_cmd_use_polling(dev); 1440 mlx4_cmd_use_polling(dev);
1417 mlx4_cleanup_eq_table(dev); 1441 mlx4_cleanup_eq_table(dev);
1418 mlx4_cleanup_mr_table(dev); 1442 mlx4_cleanup_mr_table(dev);
1443 mlx4_cleanup_xrcd_table(dev);
1419 mlx4_cleanup_pd_table(dev); 1444 mlx4_cleanup_pd_table(dev);
1420 1445
1421 iounmap(priv->kar); 1446 iounmap(priv->kar);
@@ -1489,10 +1514,9 @@ static int __init mlx4_verify_params(void)
1489 return -1; 1514 return -1;
1490 } 1515 }
1491 1516
1492 if ((log_num_vlan < 0) || (log_num_vlan > 7)) { 1517 if (log_num_vlan != 0)
1493 pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan); 1518 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
1494 return -1; 1519 MLX4_LOG_NUM_VLANS);
1495 }
1496 1520
1497 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 1521 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
1498 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 1522 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index cd1784593a3c..978688c31046 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36 36
37#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
38#include <linux/export.h>
38 39
39#include "mlx4.h" 40#include "mlx4.h"
40 41
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a2fcd8402d37..5dfa68ffc11c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -335,6 +335,7 @@ struct mlx4_priv {
335 struct mlx4_cmd cmd; 335 struct mlx4_cmd cmd;
336 336
337 struct mlx4_bitmap pd_bitmap; 337 struct mlx4_bitmap pd_bitmap;
338 struct mlx4_bitmap xrcd_bitmap;
338 struct mlx4_uar_table uar_table; 339 struct mlx4_uar_table uar_table;
339 struct mlx4_mr_table mr_table; 340 struct mlx4_mr_table mr_table;
340 struct mlx4_cq_table cq_table; 341 struct mlx4_cq_table cq_table;
@@ -384,6 +385,7 @@ int mlx4_alloc_eq_table(struct mlx4_dev *dev);
384void mlx4_free_eq_table(struct mlx4_dev *dev); 385void mlx4_free_eq_table(struct mlx4_dev *dev);
385 386
386int mlx4_init_pd_table(struct mlx4_dev *dev); 387int mlx4_init_pd_table(struct mlx4_dev *dev);
388int mlx4_init_xrcd_table(struct mlx4_dev *dev);
387int mlx4_init_uar_table(struct mlx4_dev *dev); 389int mlx4_init_uar_table(struct mlx4_dev *dev);
388int mlx4_init_mr_table(struct mlx4_dev *dev); 390int mlx4_init_mr_table(struct mlx4_dev *dev);
389int mlx4_init_eq_table(struct mlx4_dev *dev); 391int mlx4_init_eq_table(struct mlx4_dev *dev);
@@ -393,6 +395,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev);
393int mlx4_init_mcg_table(struct mlx4_dev *dev); 395int mlx4_init_mcg_table(struct mlx4_dev *dev);
394 396
395void mlx4_cleanup_pd_table(struct mlx4_dev *dev); 397void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
398void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev);
396void mlx4_cleanup_uar_table(struct mlx4_dev *dev); 399void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
397void mlx4_cleanup_mr_table(struct mlx4_dev *dev); 400void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
398void mlx4_cleanup_eq_table(struct mlx4_dev *dev); 401void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
@@ -450,6 +453,7 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
450 453
451int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); 454int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
452int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); 455int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
456int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
453 457
454int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 458int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
455 enum mlx4_protocol prot, enum mlx4_steer_type steer); 459 enum mlx4_protocol prot, enum mlx4_steer_type steer);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index fca66165110e..8fda331c65df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -581,8 +581,9 @@ extern const struct ethtool_ops mlx4_en_ethtool_ops;
581 * printk / logging functions 581 * printk / logging functions
582 */ 582 */
583 583
584__printf(3, 4)
584int en_print(const char *level, const struct mlx4_en_priv *priv, 585int en_print(const char *level, const struct mlx4_en_priv *priv,
585 const char *format, ...) __attribute__ ((format (printf, 3, 4))); 586 const char *format, ...);
586 587
587#define en_dbg(mlevel, priv, format, arg...) \ 588#define en_dbg(mlevel, priv, format, arg...) \
588do { \ 589do { \
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 9c188bdd7f4f..efa3e77355e4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/errno.h> 35#include <linux/errno.h>
36#include <linux/export.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
37 38
38#include <linux/mlx4/cmd.h> 39#include <linux/mlx4/cmd.h>
@@ -139,7 +140,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
139 140
140 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), 141 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
141 GFP_KERNEL); 142 GFP_KERNEL);
142 buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *), 143 buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
143 GFP_KERNEL); 144 GFP_KERNEL);
144 if (!buddy->bits || !buddy->num_free) 145 if (!buddy->bits || !buddy->num_free)
145 goto err_out; 146 goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 1286b886dcea..260ed259ce9b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/export.h>
35#include <linux/io-mapping.h> 36#include <linux/io-mapping.h>
36 37
37#include <asm/page.h> 38#include <asm/page.h>
@@ -61,6 +62,24 @@ void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
61} 62}
62EXPORT_SYMBOL_GPL(mlx4_pd_free); 63EXPORT_SYMBOL_GPL(mlx4_pd_free);
63 64
65int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
66{
67 struct mlx4_priv *priv = mlx4_priv(dev);
68
69 *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap);
70 if (*xrcdn == -1)
71 return -ENOMEM;
72
73 return 0;
74}
75EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
76
77void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
78{
79 mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn);
80}
81EXPORT_SYMBOL_GPL(mlx4_xrcd_free);
82
64int mlx4_init_pd_table(struct mlx4_dev *dev) 83int mlx4_init_pd_table(struct mlx4_dev *dev)
65{ 84{
66 struct mlx4_priv *priv = mlx4_priv(dev); 85 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -74,6 +93,18 @@ void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
74 mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); 93 mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
75} 94}
76 95
96int mlx4_init_xrcd_table(struct mlx4_dev *dev)
97{
98 struct mlx4_priv *priv = mlx4_priv(dev);
99
100 return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16),
101 (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0);
102}
103
104void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
105{
106 mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap);
107}
77 108
78int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) 109int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
79{ 110{
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 163a314c148f..d942aea4927b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/errno.h> 33#include <linux/errno.h>
34#include <linux/if_ether.h> 34#include <linux/if_ether.h>
35#include <linux/export.h>
35 36
36#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
37 38
@@ -148,22 +149,26 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
148 149
149 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { 150 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
150 err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); 151 err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
151 if (!err) { 152 if (err)
152 entry = kmalloc(sizeof *entry, GFP_KERNEL);
153 if (!entry) {
154 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
155 return -ENOMEM;
156 }
157 entry->mac = mac;
158 err = radix_tree_insert(&info->mac_tree, *qpn, entry);
159 if (err) {
160 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
161 return err;
162 }
163 } else
164 return err; 153 return err;
154
155 entry = kmalloc(sizeof *entry, GFP_KERNEL);
156 if (!entry) {
157 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
158 return -ENOMEM;
159 }
160
161 entry->mac = mac;
162 err = radix_tree_insert(&info->mac_tree, *qpn, entry);
163 if (err) {
164 kfree(entry);
165 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
166 return err;
167 }
165 } 168 }
169
166 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); 170 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
171
167 mutex_lock(&table->mutex); 172 mutex_lock(&table->mutex);
168 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { 173 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
169 if (free < 0 && !table->refs[i]) { 174 if (free < 0 && !table->refs[i]) {
@@ -465,6 +470,48 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
465 return err; 470 return err;
466} 471}
467 472
473int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
474{
475 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
476 u8 *inbuf, *outbuf;
477 int err, packet_error;
478
479 inmailbox = mlx4_alloc_cmd_mailbox(dev);
480 if (IS_ERR(inmailbox))
481 return PTR_ERR(inmailbox);
482
483 outmailbox = mlx4_alloc_cmd_mailbox(dev);
484 if (IS_ERR(outmailbox)) {
485 mlx4_free_cmd_mailbox(dev, inmailbox);
486 return PTR_ERR(outmailbox);
487 }
488
489 inbuf = inmailbox->buf;
490 outbuf = outmailbox->buf;
491 memset(inbuf, 0, 256);
492 memset(outbuf, 0, 256);
493 inbuf[0] = 1;
494 inbuf[1] = 1;
495 inbuf[2] = 1;
496 inbuf[3] = 1;
497
498 *(__be16 *) (&inbuf[16]) = MLX4_ATTR_EXTENDED_PORT_INFO;
499 *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
500
501 err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
502 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
503
504 packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
505
506 dev->caps.ext_port_cap[port] = (!err && !packet_error) ?
507 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO
508 : 0;
509
510 mlx4_free_cmd_mailbox(dev, inmailbox);
511 mlx4_free_cmd_mailbox(dev, outmailbox);
512 return err;
513}
514
468int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) 515int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
469{ 516{
470 struct mlx4_cmd_mailbox *mailbox; 517 struct mlx4_cmd_mailbox *mailbox;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index ec9350e5f21a..15f870cb2590 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -34,6 +34,7 @@
34 */ 34 */
35 35
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/export.h>
37#include <linux/mlx4/cmd.h> 38#include <linux/mlx4/cmd.h>
38#include <linux/mlx4/qp.h> 39#include <linux/mlx4/qp.h>
39 40
@@ -280,6 +281,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
280 * We reserve 2 extra QPs per port for the special QPs. The 281 * We reserve 2 extra QPs per port for the special QPs. The
281 * block of special QPs must be aligned to a multiple of 8, so 282 * block of special QPs must be aligned to a multiple of 8, so
282 * round up. 283 * round up.
284 *
285 * We also reserve the MSB of the 24-bit QP number to indicate
286 * that a QP is an XRC QP.
283 */ 287 */
284 dev->caps.sqp_start = 288 dev->caps.sqp_start =
285 ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); 289 ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 3b07b80a0456..9cbf3fce0145 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/mlx4/cmd.h> 34#include <linux/mlx4/cmd.h>
35#include <linux/export.h>
35#include <linux/gfp.h> 36#include <linux/gfp.h>
36 37
37#include "mlx4.h" 38#include "mlx4.h"
@@ -40,20 +41,20 @@
40struct mlx4_srq_context { 41struct mlx4_srq_context {
41 __be32 state_logsize_srqn; 42 __be32 state_logsize_srqn;
42 u8 logstride; 43 u8 logstride;
43 u8 reserved1[3]; 44 u8 reserved1;
44 u8 pg_offset; 45 __be16 xrcd;
45 u8 reserved2[3]; 46 __be32 pg_offset_cqn;
46 u32 reserved3; 47 u32 reserved2;
47 u8 log_page_size; 48 u8 log_page_size;
48 u8 reserved4[2]; 49 u8 reserved3[2];
49 u8 mtt_base_addr_h; 50 u8 mtt_base_addr_h;
50 __be32 mtt_base_addr_l; 51 __be32 mtt_base_addr_l;
51 __be32 pd; 52 __be32 pd;
52 __be16 limit_watermark; 53 __be16 limit_watermark;
53 __be16 wqe_cnt; 54 __be16 wqe_cnt;
54 u16 reserved5; 55 u16 reserved4;
55 __be16 wqe_counter; 56 __be16 wqe_counter;
56 u32 reserved6; 57 u32 reserved5;
57 __be64 db_rec_addr; 58 __be64 db_rec_addr;
58}; 59};
59 60
@@ -109,8 +110,8 @@ static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
109 MLX4_CMD_TIME_CLASS_A); 110 MLX4_CMD_TIME_CLASS_A);
110} 111}
111 112
112int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, 113int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
113 u64 db_rec, struct mlx4_srq *srq) 114 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
114{ 115{
115 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; 116 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
116 struct mlx4_cmd_mailbox *mailbox; 117 struct mlx4_cmd_mailbox *mailbox;
@@ -148,6 +149,8 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
148 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | 149 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
149 srq->srqn); 150 srq->srqn);
150 srq_context->logstride = srq->wqe_shift - 4; 151 srq_context->logstride = srq->wqe_shift - 4;
152 srq_context->xrcd = cpu_to_be16(xrcd);
153 srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff);
151 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 154 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
152 155
153 mtt_addr = mlx4_mtt_addr(dev, mtt); 156 mtt_addr = mlx4_mtt_addr(dev, mtt);
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index 4a6b9fd073b6..eb836f770f50 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -5,7 +5,10 @@
5config NET_VENDOR_NATSEMI 5config NET_VENDOR_NATSEMI
6 bool "National Semi-conductor devices" 6 bool "National Semi-conductor devices"
7 default y 7 default y
8 depends on MCA || MAC || MACH_JAZZ || PCI || XTENSA_PLATFORM_XT2000 8 depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \
9 ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \
10 MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \
11 XTENSA_PLATFORM_XT2000 || ZORRO
9 ---help--- 12 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 13 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 14 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 671e166b5af1..a83197d757c1 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -55,6 +55,7 @@
55#include <linux/firmware.h> 55#include <linux/firmware.h>
56#include <linux/net_tstamp.h> 56#include <linux/net_tstamp.h>
57#include <linux/prefetch.h> 57#include <linux/prefetch.h>
58#include <linux/module.h>
58#include "vxge-main.h" 59#include "vxge-main.h"
59#include "vxge-reg.h" 60#include "vxge-reg.h"
60 61
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1e37eb98c4e2..1dca57013cb2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1682,6 +1682,7 @@ static void nv_get_hw_stats(struct net_device *dev)
1682 np->estats.tx_pause += readl(base + NvRegTxPause); 1682 np->estats.tx_pause += readl(base + NvRegTxPause);
1683 np->estats.rx_pause += readl(base + NvRegRxPause); 1683 np->estats.rx_pause += readl(base + NvRegRxPause);
1684 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1684 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1685 np->estats.rx_errors_total += np->estats.rx_drop_frame;
1685 } 1686 }
1686 1687
1687 if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1688 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
@@ -1706,11 +1707,14 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1706 nv_get_hw_stats(dev); 1707 nv_get_hw_stats(dev);
1707 1708
1708 /* copy to net_device stats */ 1709 /* copy to net_device stats */
1710 dev->stats.tx_packets = np->estats.tx_packets;
1711 dev->stats.rx_bytes = np->estats.rx_bytes;
1709 dev->stats.tx_bytes = np->estats.tx_bytes; 1712 dev->stats.tx_bytes = np->estats.tx_bytes;
1710 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; 1713 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1711 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; 1714 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1712 dev->stats.rx_crc_errors = np->estats.rx_crc_errors; 1715 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1713 dev->stats.rx_over_errors = np->estats.rx_over_errors; 1716 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1717 dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
1714 dev->stats.rx_errors = np->estats.rx_errors_total; 1718 dev->stats.rx_errors = np->estats.rx_errors_total;
1715 dev->stats.tx_errors = np->estats.tx_errors_total; 1719 dev->stats.tx_errors = np->estats.tx_errors_total;
1716 } 1720 }
@@ -2099,10 +2103,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2099 2103
2100 /* add fragments to entries count */ 2104 /* add fragments to entries count */
2101 for (i = 0; i < fragments; i++) { 2105 for (i = 0; i < fragments; i++) {
2102 u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2106 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2103 2107
2104 entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2108 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2105 ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2109 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2106 } 2110 }
2107 2111
2108 spin_lock_irqsave(&np->lock, flags); 2112 spin_lock_irqsave(&np->lock, flags);
@@ -2141,13 +2145,13 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2141 /* setup the fragments */ 2145 /* setup the fragments */
2142 for (i = 0; i < fragments; i++) { 2146 for (i = 0; i < fragments; i++) {
2143 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2147 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2144 u32 size = skb_frag_size(frag); 2148 u32 frag_size = skb_frag_size(frag);
2145 offset = 0; 2149 offset = 0;
2146 2150
2147 do { 2151 do {
2148 prev_tx = put_tx; 2152 prev_tx = put_tx;
2149 prev_tx_ctx = np->put_tx_ctx; 2153 prev_tx_ctx = np->put_tx_ctx;
2150 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2154 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2151 np->put_tx_ctx->dma = skb_frag_dma_map( 2155 np->put_tx_ctx->dma = skb_frag_dma_map(
2152 &np->pci_dev->dev, 2156 &np->pci_dev->dev,
2153 frag, offset, 2157 frag, offset,
@@ -2159,12 +2163,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2159 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2163 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2160 2164
2161 offset += bcnt; 2165 offset += bcnt;
2162 size -= bcnt; 2166 frag_size -= bcnt;
2163 if (unlikely(put_tx++ == np->last_tx.orig)) 2167 if (unlikely(put_tx++ == np->last_tx.orig))
2164 put_tx = np->first_tx.orig; 2168 put_tx = np->first_tx.orig;
2165 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2169 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2166 np->put_tx_ctx = np->first_tx_ctx; 2170 np->put_tx_ctx = np->first_tx_ctx;
2167 } while (size); 2171 } while (frag_size);
2168 } 2172 }
2169 2173
2170 /* set last fragment flag */ 2174 /* set last fragment flag */
@@ -2213,10 +2217,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2213 2217
2214 /* add fragments to entries count */ 2218 /* add fragments to entries count */
2215 for (i = 0; i < fragments; i++) { 2219 for (i = 0; i < fragments; i++) {
2216 u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2220 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2217 2221
2218 entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2222 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2219 ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2223 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2220 } 2224 }
2221 2225
2222 spin_lock_irqsave(&np->lock, flags); 2226 spin_lock_irqsave(&np->lock, flags);
@@ -2257,13 +2261,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2257 /* setup the fragments */ 2261 /* setup the fragments */
2258 for (i = 0; i < fragments; i++) { 2262 for (i = 0; i < fragments; i++) {
2259 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2263 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2260 u32 size = skb_frag_size(frag); 2264 u32 frag_size = skb_frag_size(frag);
2261 offset = 0; 2265 offset = 0;
2262 2266
2263 do { 2267 do {
2264 prev_tx = put_tx; 2268 prev_tx = put_tx;
2265 prev_tx_ctx = np->put_tx_ctx; 2269 prev_tx_ctx = np->put_tx_ctx;
2266 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 2270 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2267 np->put_tx_ctx->dma = skb_frag_dma_map( 2271 np->put_tx_ctx->dma = skb_frag_dma_map(
2268 &np->pci_dev->dev, 2272 &np->pci_dev->dev,
2269 frag, offset, 2273 frag, offset,
@@ -2276,12 +2280,12 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2276 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 2280 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2277 2281
2278 offset += bcnt; 2282 offset += bcnt;
2279 size -= bcnt; 2283 frag_size -= bcnt;
2280 if (unlikely(put_tx++ == np->last_tx.ex)) 2284 if (unlikely(put_tx++ == np->last_tx.ex))
2281 put_tx = np->first_tx.ex; 2285 put_tx = np->first_tx.ex;
2282 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) 2286 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2283 np->put_tx_ctx = np->first_tx_ctx; 2287 np->put_tx_ctx = np->first_tx_ctx;
2284 } while (size); 2288 } while (frag_size);
2285 } 2289 }
2286 2290
2287 /* set last fragment flag */ 2291 /* set last fragment flag */
@@ -2374,16 +2378,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
2374 if (np->desc_ver == DESC_VER_1) { 2378 if (np->desc_ver == DESC_VER_1) {
2375 if (flags & NV_TX_LASTPACKET) { 2379 if (flags & NV_TX_LASTPACKET) {
2376 if (flags & NV_TX_ERROR) { 2380 if (flags & NV_TX_ERROR) {
2377 if (flags & NV_TX_UNDERFLOW)
2378 dev->stats.tx_fifo_errors++;
2379 if (flags & NV_TX_CARRIERLOST)
2380 dev->stats.tx_carrier_errors++;
2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) 2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2382 nv_legacybackoff_reseed(dev); 2382 nv_legacybackoff_reseed(dev);
2383 dev->stats.tx_errors++;
2384 } else {
2385 dev->stats.tx_packets++;
2386 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2387 } 2383 }
2388 dev_kfree_skb_any(np->get_tx_ctx->skb); 2384 dev_kfree_skb_any(np->get_tx_ctx->skb);
2389 np->get_tx_ctx->skb = NULL; 2385 np->get_tx_ctx->skb = NULL;
@@ -2392,16 +2388,8 @@ static int nv_tx_done(struct net_device *dev, int limit)
2392 } else { 2388 } else {
2393 if (flags & NV_TX2_LASTPACKET) { 2389 if (flags & NV_TX2_LASTPACKET) {
2394 if (flags & NV_TX2_ERROR) { 2390 if (flags & NV_TX2_ERROR) {
2395 if (flags & NV_TX2_UNDERFLOW)
2396 dev->stats.tx_fifo_errors++;
2397 if (flags & NV_TX2_CARRIERLOST)
2398 dev->stats.tx_carrier_errors++;
2399 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2391 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2400 nv_legacybackoff_reseed(dev); 2392 nv_legacybackoff_reseed(dev);
2401 dev->stats.tx_errors++;
2402 } else {
2403 dev->stats.tx_packets++;
2404 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2405 } 2393 }
2406 dev_kfree_skb_any(np->get_tx_ctx->skb); 2394 dev_kfree_skb_any(np->get_tx_ctx->skb);
2407 np->get_tx_ctx->skb = NULL; 2395 np->get_tx_ctx->skb = NULL;
@@ -2434,9 +2422,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2434 nv_unmap_txskb(np, np->get_tx_ctx); 2422 nv_unmap_txskb(np, np->get_tx_ctx);
2435 2423
2436 if (flags & NV_TX2_LASTPACKET) { 2424 if (flags & NV_TX2_LASTPACKET) {
2437 if (!(flags & NV_TX2_ERROR)) 2425 if (flags & NV_TX2_ERROR) {
2438 dev->stats.tx_packets++;
2439 else {
2440 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { 2426 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2441 if (np->driver_data & DEV_HAS_GEAR_MODE) 2427 if (np->driver_data & DEV_HAS_GEAR_MODE)
2442 nv_gear_backoff_reseed(dev); 2428 nv_gear_backoff_reseed(dev);
@@ -2636,7 +2622,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2636 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2622 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2637 len = nv_getlen(dev, skb->data, len); 2623 len = nv_getlen(dev, skb->data, len);
2638 if (len < 0) { 2624 if (len < 0) {
2639 dev->stats.rx_errors++;
2640 dev_kfree_skb(skb); 2625 dev_kfree_skb(skb);
2641 goto next_pkt; 2626 goto next_pkt;
2642 } 2627 }
@@ -2650,11 +2635,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2650 else { 2635 else {
2651 if (flags & NV_RX_MISSEDFRAME) 2636 if (flags & NV_RX_MISSEDFRAME)
2652 dev->stats.rx_missed_errors++; 2637 dev->stats.rx_missed_errors++;
2653 if (flags & NV_RX_CRCERR)
2654 dev->stats.rx_crc_errors++;
2655 if (flags & NV_RX_OVERFLOW)
2656 dev->stats.rx_over_errors++;
2657 dev->stats.rx_errors++;
2658 dev_kfree_skb(skb); 2638 dev_kfree_skb(skb);
2659 goto next_pkt; 2639 goto next_pkt;
2660 } 2640 }
@@ -2670,7 +2650,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2670 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2650 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2671 len = nv_getlen(dev, skb->data, len); 2651 len = nv_getlen(dev, skb->data, len);
2672 if (len < 0) { 2652 if (len < 0) {
2673 dev->stats.rx_errors++;
2674 dev_kfree_skb(skb); 2653 dev_kfree_skb(skb);
2675 goto next_pkt; 2654 goto next_pkt;
2676 } 2655 }
@@ -2682,11 +2661,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2682 } 2661 }
2683 /* the rest are hard errors */ 2662 /* the rest are hard errors */
2684 else { 2663 else {
2685 if (flags & NV_RX2_CRCERR)
2686 dev->stats.rx_crc_errors++;
2687 if (flags & NV_RX2_OVERFLOW)
2688 dev->stats.rx_over_errors++;
2689 dev->stats.rx_errors++;
2690 dev_kfree_skb(skb); 2664 dev_kfree_skb(skb);
2691 goto next_pkt; 2665 goto next_pkt;
2692 } 2666 }
@@ -2704,7 +2678,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2704 skb->protocol = eth_type_trans(skb, dev); 2678 skb->protocol = eth_type_trans(skb, dev);
2705 napi_gro_receive(&np->napi, skb); 2679 napi_gro_receive(&np->napi, skb);
2706 dev->stats.rx_packets++; 2680 dev->stats.rx_packets++;
2707 dev->stats.rx_bytes += len;
2708next_pkt: 2681next_pkt:
2709 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2682 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2710 np->get_rx.orig = np->first_rx.orig; 2683 np->get_rx.orig = np->first_rx.orig;
@@ -2787,9 +2760,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2787 __vlan_hwaccel_put_tag(skb, vid); 2760 __vlan_hwaccel_put_tag(skb, vid);
2788 } 2761 }
2789 napi_gro_receive(&np->napi, skb); 2762 napi_gro_receive(&np->napi, skb);
2790
2791 dev->stats.rx_packets++; 2763 dev->stats.rx_packets++;
2792 dev->stats.rx_bytes += len;
2793 } else { 2764 } else {
2794 dev_kfree_skb(skb); 2765 dev_kfree_skb(skb);
2795 } 2766 }
@@ -2962,11 +2933,11 @@ static void nv_set_multicast(struct net_device *dev)
2962 struct netdev_hw_addr *ha; 2933 struct netdev_hw_addr *ha;
2963 2934
2964 netdev_for_each_mc_addr(ha, dev) { 2935 netdev_for_each_mc_addr(ha, dev) {
2965 unsigned char *addr = ha->addr; 2936 unsigned char *hw_addr = ha->addr;
2966 u32 a, b; 2937 u32 a, b;
2967 2938
2968 a = le32_to_cpu(*(__le32 *) addr); 2939 a = le32_to_cpu(*(__le32 *) hw_addr);
2969 b = le16_to_cpu(*(__le16 *) (&addr[4])); 2940 b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
2970 alwaysOn[0] &= a; 2941 alwaysOn[0] &= a;
2971 alwaysOff[0] &= ~a; 2942 alwaysOff[0] &= ~a;
2972 alwaysOn[1] &= b; 2943 alwaysOn[1] &= b;
@@ -3398,7 +3369,8 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3398 3369
3399 for (i = 0;; i++) { 3370 for (i = 0;; i++) {
3400 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3371 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3401 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3372 writel(events, base + NvRegMSIXIrqStatus);
3373 netdev_dbg(dev, "tx irq events: %08x\n", events);
3402 if (!(events & np->irqmask)) 3374 if (!(events & np->irqmask))
3403 break; 3375 break;
3404 3376
@@ -3509,7 +3481,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3509 3481
3510 for (i = 0;; i++) { 3482 for (i = 0;; i++) {
3511 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3483 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3512 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3484 writel(events, base + NvRegMSIXIrqStatus);
3485 netdev_dbg(dev, "rx irq events: %08x\n", events);
3513 if (!(events & np->irqmask)) 3486 if (!(events & np->irqmask))
3514 break; 3487 break;
3515 3488
@@ -3553,7 +3526,8 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3553 3526
3554 for (i = 0;; i++) { 3527 for (i = 0;; i++) {
3555 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3528 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3556 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3529 writel(events, base + NvRegMSIXIrqStatus);
3530 netdev_dbg(dev, "irq events: %08x\n", events);
3557 if (!(events & np->irqmask)) 3531 if (!(events & np->irqmask))
3558 break; 3532 break;
3559 3533
@@ -3617,10 +3591,10 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3617 3591
3618 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3592 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3619 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3593 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3620 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3594 writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3621 } else { 3595 } else {
3622 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 3596 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3623 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3597 writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3624 } 3598 }
3625 pci_push(base); 3599 pci_push(base);
3626 if (!(events & NVREG_IRQ_TIMER)) 3600 if (!(events & NVREG_IRQ_TIMER))
@@ -4566,7 +4540,7 @@ static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *e
4566 struct fe_priv *np = netdev_priv(dev); 4540 struct fe_priv *np = netdev_priv(dev);
4567 4541
4568 /* update stats */ 4542 /* update stats */
4569 nv_do_stats_poll((unsigned long)dev); 4543 nv_get_hw_stats(dev);
4570 4544
4571 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); 4545 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4572} 4546}
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index bc1d946b7971..212f43b308a3 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -9,6 +9,7 @@
9#include <linux/capability.h> 9#include <linux/capability.h>
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/module.h>
12#include <linux/interrupt.h> 13#include <linux/interrupt.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/netdevice.h> 15#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index b89f3a684aec..48406ca382f1 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -20,6 +20,7 @@
20 20
21#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include "pch_gbe_api.h" 22#include "pch_gbe_api.h"
23#include <linux/module.h>
23 24
24#define DRV_VERSION "1.00" 25#define DRV_VERSION "1.00"
25const char pch_driver_version[] = DRV_VERSION; 26const char pch_driver_version[] = DRV_VERSION;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 5b5d90a47e29..9c075ea2682e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -18,6 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> /* for __MODULE_STRING */
21#include "pch_gbe.h" 22#include "pch_gbe.h"
22 23
23#define OPTION_UNSET -1 24#define OPTION_UNSET -1
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index 84083ec6e612..0578859a3c73 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -115,16 +115,4 @@ config R8169
115 To compile this driver as a module, choose M here: the module 115 To compile this driver as a module, choose M here: the module
116 will be called r8169. This is recommended. 116 will be called r8169. This is recommended.
117 117
118config SC92031
119 tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
120 depends on PCI && EXPERIMENTAL
121 select CRC32
122 ---help---
123 This is a driver for the Fast Ethernet PCI network cards based on
124 the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
125 have one of these, say Y here.
126
127 To compile this driver as a module, choose M here: the module
128 will be called sc92031. This is recommended.
129
130endif # NET_VENDOR_REALTEK 118endif # NET_VENDOR_REALTEK
diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile
index e48cfb6ac42d..71b1da30ecb5 100644
--- a/drivers/net/ethernet/realtek/Makefile
+++ b/drivers/net/ethernet/realtek/Makefile
@@ -6,4 +6,3 @@ obj-$(CONFIG_8139CP) += 8139cp.o
6obj-$(CONFIG_8139TOO) += 8139too.o 6obj-$(CONFIG_8139TOO) += 8139too.o
7obj-$(CONFIG_ATP) += atp.o 7obj-$(CONFIG_ATP) += atp.o
8obj-$(CONFIG_R8169) += r8169.o 8obj-$(CONFIG_R8169) += r8169.o
9obj-$(CONFIG_SC92031) += sc92031.o
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index de9afebe1830..d5731f1fe6d6 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2229,13 +2229,15 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2229 2229
2230/* PCI device ID table */ 2230/* PCI device ID table */
2231static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { 2231static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
2232 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 2232 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2233 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2233 .driver_data = (unsigned long) &falcon_a1_nic_type}, 2234 .driver_data = (unsigned long) &falcon_a1_nic_type},
2234 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 2235 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2236 PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2235 .driver_data = (unsigned long) &falcon_b0_nic_type}, 2237 .driver_data = (unsigned long) &falcon_b0_nic_type},
2236 {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), 2238 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
2237 .driver_data = (unsigned long) &siena_a0_nic_type}, 2239 .driver_data = (unsigned long) &siena_a0_nic_type},
2238 {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), 2240 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
2239 .driver_data = (unsigned long) &siena_a0_nic_type}, 2241 .driver_data = (unsigned long) &siena_a0_nic_type},
2240 {0} /* end of list */ 2242 {0} /* end of list */
2241}; 2243};
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 442f4d0c247d..4764793ed234 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,10 +15,6 @@
15#include "filter.h" 15#include "filter.h"
16 16
17/* PCI IDs */ 17/* PCI IDs */
18#define EFX_VENDID_SFC 0x1924
19#define FALCON_A_P_DEVID 0x0703
20#define FALCON_A_S_DEVID 0x6703
21#define FALCON_B_P_DEVID 0x0710
22#define BETHPAGE_A_P_DEVID 0x0803 18#define BETHPAGE_A_P_DEVID 0x0803
23#define SIENA_A_P_DEVID 0x0813 19#define SIENA_A_P_DEVID 0x0813
24 20
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 4dd1748a19c6..97b606b92e88 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1426,7 +1426,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
1426 } 1426 }
1427 1427
1428 dev = pci_dev_get(efx->pci_dev); 1428 dev = pci_dev_get(efx->pci_dev);
1429 while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, 1429 while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
1430 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
1430 dev))) { 1431 dev))) {
1431 if (dev->bus == efx->pci_dev->bus && 1432 if (dev->bus == efx->pci_dev->bus &&
1432 dev->devfn == efx->pci_dev->devfn + 1) { 1433 dev->devfn == efx->pci_dev->devfn + 1) {
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index b9cc846811d6..6cc16b8cc6f4 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -764,7 +764,8 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
764 764
765 if (board->type) { 765 if (board->type) {
766 netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n", 766 netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
767 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 767 (efx->pci_dev->subsystem_vendor ==
768 PCI_VENDOR_ID_SOLARFLARE)
768 ? board->type->ref_model : board->type->gen_type, 769 ? board->type->ref_model : board->type->gen_type,
769 'A' + board->major, board->minor); 770 'A' + board->major, board->minor);
770 return 0; 771 return 0;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index adbda182f159..752d521c09b1 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -15,6 +15,7 @@
15#include <linux/tcp.h> 15#include <linux/tcp.h>
16#include <linux/udp.h> 16#include <linux/udp.h>
17#include <linux/prefetch.h> 17#include <linux/prefetch.h>
18#include <linux/moduleparam.h>
18#include <net/ip.h> 19#include <net/ip.h>
19#include <net/checksum.h> 20#include <net/checksum.h>
20#include "net_driver.h" 21#include "net_driver.h"
diff --git a/drivers/net/ethernet/silan/Kconfig b/drivers/net/ethernet/silan/Kconfig
new file mode 100644
index 000000000000..ae1ce170864d
--- /dev/null
+++ b/drivers/net/ethernet/silan/Kconfig
@@ -0,0 +1,33 @@
1#
2# Silan device configuration
3#
4
5config NET_VENDOR_SILAN
6 bool "Silan devices"
7 default y
8 depends on PCI && EXPERIMENTAL
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about Silan devices. If you say Y, you will be asked for
17 your specific card in the following questions.
18
19if NET_VENDOR_SILAN
20
21config SC92031
22 tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
23 depends on PCI && EXPERIMENTAL
24 select CRC32
25 ---help---
26 This is a driver for the Fast Ethernet PCI network cards based on
27 the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
28 have one of these, say Y here.
29
30 To compile this driver as a module, choose M here: the module
31 will be called sc92031. This is recommended.
32
33endif # NET_VENDOR_SILAN
diff --git a/drivers/net/ethernet/silan/Makefile b/drivers/net/ethernet/silan/Makefile
new file mode 100644
index 000000000000..4ad3523dcb92
--- /dev/null
+++ b/drivers/net/ethernet/silan/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Silan network device drivers.
3#
4
5obj-$(CONFIG_SC92031) += sc92031.o
diff --git a/drivers/net/ethernet/realtek/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index a284d6440538..a284d6440538 100644
--- a/drivers/net/ethernet/realtek/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 4f15680849ff..edb24b0e337b 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -28,6 +28,7 @@
28#include <linux/dma-mapping.h> 28#include <linux/dma-mapping.h>
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/module.h>
31#include <asm/unaligned.h> 32#include <asm/unaligned.h>
32#include "smsc9420.h" 33#include "smsc9420.h"
33 34
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index ac6f190743dd..22745d7bf530 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -29,15 +29,6 @@ config STMMAC_DA
29 By default, the DMA arbitration scheme is based on Round-robin 29 By default, the DMA arbitration scheme is based on Round-robin
30 (rx:tx priority is 1:1). 30 (rx:tx priority is 1:1).
31 31
32config STMMAC_DUAL_MAC
33 bool "STMMAC: dual mac support (EXPERIMENTAL)"
34 default n
35 depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
36 ---help---
37 Some ST SoCs (for example the stx7141 and stx7200c2) have two
38 Ethernet Controllers. This option turns on the second Ethernet
39 device on this kind of platforms.
40
41config STMMAC_TIMER 32config STMMAC_TIMER
42 bool "STMMAC Timer optimisation" 33 bool "STMMAC Timer optimisation"
43 default n 34 default n
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index ddb33cfd3543..7bf1e2015784 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1674,6 +1674,9 @@ static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
1674 int result; 1674 int result;
1675 1675
1676 pr_debug("%s: called\n", __func__); 1676 pr_debug("%s: called\n", __func__);
1677
1678 udbg_shutdown_ps3gelic();
1679
1677 result = ps3_open_hv_device(dev); 1680 result = ps3_open_hv_device(dev);
1678 1681
1679 if (result) { 1682 if (result) {
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index d3fadfbc3bcc..a93df6ac1909 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -359,6 +359,12 @@ static inline void *port_priv(struct gelic_port *port)
359 return port->priv; 359 return port->priv;
360} 360}
361 361
362#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
363extern void udbg_shutdown_ps3gelic(void);
364#else
365static inline void udbg_shutdown_ps3gelic(void) {}
366#endif
367
362extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); 368extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
363/* shared netdev ops */ 369/* shared netdev ops */
364extern void gelic_card_up(struct gelic_card *card); 370extern void gelic_card_up(struct gelic_card *card);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 4d1658e78dee..caf3659e173c 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -716,8 +716,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
716 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 716 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
717 cur_p->phys = dma_map_single(ndev->dev.parent, 717 cur_p->phys = dma_map_single(ndev->dev.parent,
718 skb_frag_address(frag), 718 skb_frag_address(frag),
719 frag_size(frag), DMA_TO_DEVICE); 719 skb_frag_size(frag), DMA_TO_DEVICE);
720 cur_p->len = frag_size(frag); 720 cur_p->len = skb_frag_size(frag);
721 cur_p->app0 = 0; 721 cur_p->app0 = 0;
722 frag++; 722 frag++;
723 } 723 }
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index ec96d910e9a3..f45c85a84261 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -35,6 +35,7 @@
35#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36#include <linux/ptp_classify.h> 36#include <linux/ptp_classify.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/module.h>
38#include <mach/ixp46x_ts.h> 39#include <mach/ixp46x_ts.h>
39#include <mach/npe.h> 40#include <mach/npe.h>
40#include <mach/qmgr.h> 41#include <mach/qmgr.h>
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index a40fab44b9ae..d423d18b4ad6 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -314,7 +314,7 @@ config TOSHIBA_FIR
314 314
315config AU1000_FIR 315config AU1000_FIR
316 tristate "Alchemy Au1000 SIR/FIR" 316 tristate "Alchemy Au1000 SIR/FIR"
317 depends on SOC_AU1000 && IRDA 317 depends on IRDA && MIPS_ALCHEMY
318 318
319config SMC_IRCC_FIR 319config SMC_IRCC_FIR
320 tristate "SMSC IrCC (EXPERIMENTAL)" 320 tristate "SMSC IrCC (EXPERIMENTAL)"
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a3ce3d4561ed..74134970b709 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -192,6 +192,13 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
192 */ 192 */
193 macvlan_broadcast(skb, port, src->dev, 193 macvlan_broadcast(skb, port, src->dev,
194 MACVLAN_MODE_VEPA); 194 MACVLAN_MODE_VEPA);
195 else {
196 /* forward to original port. */
197 vlan = src;
198 ret = macvlan_broadcast_one(skb, vlan, eth, 0);
199 goto out;
200 }
201
195 return RX_HANDLER_PASS; 202 return RX_HANDLER_PASS;
196 } 203 }
197 204
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a4eae750a414..f414ffb5b728 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -14,6 +14,7 @@
14 * 14 *
15 */ 15 */
16#include <linux/phy.h> 16#include <linux/phy.h>
17#include <linux/module.h>
17 18
18#define RTL821x_PHYSR 0x11 19#define RTL821x_PHYSR 0x11
19#define RTL821x_PHYSR_DUPLEX 0x2000 20#define RTL821x_PHYSR_DUPLEX 0x2000
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 3bb131137033..7145714a5ec9 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -88,8 +88,8 @@ static struct rio_dev **rionet_active;
88#define dev_rionet_capable(dev) \ 88#define dev_rionet_capable(dev) \
89 is_rionet_capable(dev->src_ops, dev->dst_ops) 89 is_rionet_capable(dev->src_ops, dev->dst_ops)
90 90
91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) 91#define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) 92#define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
93 93
94static int rionet_rx_clean(struct net_device *ndev) 94static int rionet_rx_clean(struct net_device *ndev)
95{ 95{
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 1e7221951056..d43db32f9478 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -27,6 +27,7 @@
27#include <linux/if_ether.h> 27#include <linux/if_ether.h>
28#include <linux/if_arp.h> 28#include <linux/if_arp.h>
29#include <linux/inetdevice.h> 29#include <linux/inetdevice.h>
30#include <linux/module.h>
30 31
31/* 32/*
32 * The device has a CDC ACM port for modem control (it claims to be 33 * The device has a CDC ACM port for modem control (it claims to be
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7d6082160bcc..fae0fbd8bc88 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1057,7 +1057,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1057 unsigned long flags; 1057 unsigned long flags;
1058 int retval; 1058 int retval;
1059 1059
1060 skb_tx_timestamp(skb); 1060 if (skb)
1061 skb_tx_timestamp(skb);
1061 1062
1062 // some devices want funky USB-level framing, for 1063 // some devices want funky USB-level framing, for
1063 // win32 driver (usually) and/or hardware quirks 1064 // win32 driver (usually) and/or hardware quirks
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5b23767ea817..ef883e97cee0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -17,6 +17,7 @@
17#include <net/dst.h> 17#include <net/dst.h>
18#include <net/xfrm.h> 18#include <net/xfrm.h>
19#include <linux/veth.h> 19#include <linux/veth.h>
20#include <linux/module.h>
20 21
21#define DRV_NAME "veth" 22#define DRV_NAME "veth"
22#define DRV_VERSION "1.0" 23#define DRV_VERSION "1.0"
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 91039ab16728..6ee8410443c4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -925,12 +925,10 @@ static void virtnet_update_status(struct virtnet_info *vi)
925{ 925{
926 u16 v; 926 u16 v;
927 927
928 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) 928 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
929 return;
930
931 vi->vdev->config->get(vi->vdev,
932 offsetof(struct virtio_net_config, status), 929 offsetof(struct virtio_net_config, status),
933 &v, sizeof(v)); 930 &v) < 0)
931 return;
934 932
935 /* Ignore unknown (future) status bits */ 933 /* Ignore unknown (future) status bits */
936 v &= VIRTIO_NET_S_LINK_UP; 934 v &= VIRTIO_NET_S_LINK_UP;
@@ -1006,11 +1004,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1006 } 1004 }
1007 1005
1008 /* Configuration may specify what MAC to use. Otherwise random. */ 1006 /* Configuration may specify what MAC to use. Otherwise random. */
1009 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 1007 if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
1010 vdev->config->get(vdev,
1011 offsetof(struct virtio_net_config, mac), 1008 offsetof(struct virtio_net_config, mac),
1012 dev->dev_addr, dev->addr_len); 1009 dev->dev_addr, dev->addr_len) < 0)
1013 } else
1014 random_ether_addr(dev->dev_addr); 1010 random_ether_addr(dev->dev_addr);
1015 1011
1016 /* Set up our device-specific information */ 1012 /* Set up our device-specific information */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b771ebac0f01..d96bfb1ac20b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -24,6 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/module.h>
27#include <net/ip6_checksum.h> 28#include <net/ip6_checksum.h>
28 29
29#include "vmxnet3_int.h" 30#include "vmxnet3_int.h"
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 727d728649b7..2fea02b35b2d 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -78,6 +78,8 @@
78#include <linux/kernel.h> 78#include <linux/kernel.h>
79#include <linux/slab.h> 79#include <linux/slab.h>
80#include <linux/wimax/i2400m.h> 80#include <linux/wimax/i2400m.h>
81#include <linux/export.h>
82#include <linux/moduleparam.h>
81 83
82 84
83#define D_SUBMODULE control 85#define D_SUBMODULE control
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 9c70b5fa3f51..129ba36bd04d 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -26,6 +26,7 @@
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/export.h>
29#include "i2400m.h" 30#include "i2400m.h"
30 31
31 32
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 85dadd5bf4be..7cbd7d231e11 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -158,6 +158,7 @@
158#include <linux/sched.h> 158#include <linux/sched.h>
159#include <linux/slab.h> 159#include <linux/slab.h>
160#include <linux/usb.h> 160#include <linux/usb.h>
161#include <linux/export.h>
161#include "i2400m.h" 162#include "i2400m.h"
162 163
163 164
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 2edd8fe1c1f3..64a110604ad3 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -76,6 +76,7 @@
76#include <linux/slab.h> 76#include <linux/slab.h>
77#include <linux/netdevice.h> 77#include <linux/netdevice.h>
78#include <linux/ethtool.h> 78#include <linux/ethtool.h>
79#include <linux/export.h>
79#include "i2400m.h" 80#include "i2400m.h"
80 81
81 82
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 2f94a872101f..37becfcc98f2 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -149,6 +149,8 @@
149#include <linux/if_arp.h> 149#include <linux/if_arp.h>
150#include <linux/netdevice.h> 150#include <linux/netdevice.h>
151#include <linux/workqueue.h> 151#include <linux/workqueue.h>
152#include <linux/export.h>
153#include <linux/moduleparam.h>
152#include "i2400m.h" 154#include "i2400m.h"
153 155
154 156
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index be428cae28d8..21a9edd6e75d 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -55,6 +55,7 @@
55#include <linux/mmc/sdio_func.h> 55#include <linux/mmc/sdio_func.h>
56#include "i2400m-sdio.h" 56#include "i2400m-sdio.h"
57#include <linux/wimax/i2400m.h> 57#include <linux/wimax/i2400m.h>
58#include <linux/module.h>
58 59
59#define D_SUBMODULE main 60#define D_SUBMODULE main
60#include "sdio-debug-levels.h" 61#include "sdio-debug-levels.h"
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 4b30ed11d785..4b9ecb20deec 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -245,6 +245,7 @@
245 */ 245 */
246#include <linux/netdevice.h> 246#include <linux/netdevice.h>
247#include <linux/slab.h> 247#include <linux/slab.h>
248#include <linux/export.h>
248#include "i2400m.h" 249#include "i2400m.h"
249 250
250 251
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 9a644d052f1e..2c1b8b687646 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -67,6 +67,7 @@
67#include <linux/wimax/i2400m.h> 67#include <linux/wimax/i2400m.h>
68#include <linux/debugfs.h> 68#include <linux/debugfs.h>
69#include <linux/slab.h> 69#include <linux/slab.h>
70#include <linux/module.h>
70 71
71 72
72#define D_SUBMODULE usb 73#define D_SUBMODULE usb
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index c1c0678b1fb6..98db76196b59 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
42obj-$(CONFIG_MWL8K) += mwl8k.o 42obj-$(CONFIG_MWL8K) += mwl8k.o
43 43
44obj-$(CONFIG_IWLWIFI) += iwlwifi/ 44obj-$(CONFIG_IWLWIFI) += iwlwifi/
45obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/ 45obj-$(CONFIG_IWLEGACY) += iwlegacy/
46obj-$(CONFIG_RT2X00) += rt2x00/ 46obj-$(CONFIG_RT2X00) += rt2x00/
47 47
48obj-$(CONFIG_P54_COMMON) += p54/ 48obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 3b752d9fb3cd..f5ce5623da99 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -25,6 +25,7 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/crc32.h> 26#include <linux/crc32.h>
27#include <linux/eeprom_93cx6.h> 27#include <linux/eeprom_93cx6.h>
28#include <linux/module.h>
28#include <net/mac80211.h> 29#include <net/mac80211.h>
29 30
30#include "adm8211.h" 31#include "adm8211.h"
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 39322d4121b7..4045e5ab0555 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -517,7 +517,7 @@ static char *hex2str(void *buf, size_t len)
517 goto exit; 517 goto exit;
518 518
519 while (len--) { 519 while (len--) {
520 obuf = pack_hex_byte(obuf, *ibuf++); 520 obuf = hex_byte_pack(obuf, *ibuf++);
521 *obuf++ = '-'; 521 *obuf++ = '-';
522 } 522 }
523 obuf--; 523 obuf--;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 073548836413..09602241901b 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,6 +1,6 @@
1menuconfig ATH_COMMON 1menuconfig ATH_COMMON
2 tristate "Atheros Wireless Cards" 2 tristate "Atheros Wireless Cards"
3 depends on CFG80211 3 depends on CFG80211 && (!UML || BROKEN)
4 ---help--- 4 ---help---
5 This will enable the support for the Atheros wireless drivers. 5 This will enable the support for the Atheros wireless drivers.
6 ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option 6 ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index fe4bf4da255f..c1d699fd5717 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -152,6 +152,7 @@ struct ath_common {
152 struct ath_cycle_counters cc_survey; 152 struct ath_cycle_counters cc_survey;
153 153
154 struct ath_regulatory regulatory; 154 struct ath_regulatory regulatory;
155 struct ath_regulatory reg_world_copy;
155 const struct ath_ops *ops; 156 const struct ath_ops *ops;
156 const struct ath_bus_ops *bus_ops; 157 const struct ath_bus_ops *bus_ops;
157 158
@@ -173,8 +174,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry);
173void ath_hw_cycle_counters_update(struct ath_common *common); 174void ath_hw_cycle_counters_update(struct ath_common *common);
174int32_t ath_hw_get_listen_time(struct ath_common *common); 175int32_t ath_hw_get_listen_time(struct ath_common *common);
175 176
176extern __attribute__((format (printf, 2, 3))) 177extern __printf(2, 3) void ath_printk(const char *level, const char *fmt, ...);
177void ath_printk(const char *level, const char *fmt, ...);
178 178
179#define _ath_printk(level, common, fmt, ...) \ 179#define _ath_printk(level, common, fmt, ...) \
180do { \ 180do { \
@@ -215,6 +215,10 @@ do { \
215 * @ATH_DBG_HWTIMER: hardware timer handling 215 * @ATH_DBG_HWTIMER: hardware timer handling
216 * @ATH_DBG_BTCOEX: bluetooth coexistance 216 * @ATH_DBG_BTCOEX: bluetooth coexistance
217 * @ATH_DBG_BSTUCK: stuck beacons 217 * @ATH_DBG_BSTUCK: stuck beacons
218 * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
219 * used exclusively for WLAN-BT coexistence starting from
220 * AR9462.
221 * @ATH_DBG_DFS: radar datection
218 * @ATH_DBG_ANY: enable all debugging 222 * @ATH_DBG_ANY: enable all debugging
219 * 223 *
220 * The debug level is used to control the amount and type of debugging output 224 * The debug level is used to control the amount and type of debugging output
@@ -241,6 +245,7 @@ enum ATH_DEBUG {
241 ATH_DBG_WMI = 0x00004000, 245 ATH_DBG_WMI = 0x00004000,
242 ATH_DBG_BSTUCK = 0x00008000, 246 ATH_DBG_BSTUCK = 0x00008000,
243 ATH_DBG_MCI = 0x00010000, 247 ATH_DBG_MCI = 0x00010000,
248 ATH_DBG_DFS = 0x00020000,
244 ATH_DBG_ANY = 0xffffffff 249 ATH_DBG_ANY = 0xffffffff
245}; 250};
246 251
@@ -259,7 +264,7 @@ do { \
259 264
260#else 265#else
261 266
262static inline __attribute__((format (printf, 3, 4))) 267static inline __attribute__ ((format (printf, 3, 4)))
263void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask, 268void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
264 const char *fmt, ...) 269 const char *fmt, ...)
265{ 270{
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e5be7e701816..ee7ea572b065 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -166,7 +166,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
166 if (to_platform_device(ah->dev)->id == 0 && 166 if (to_platform_device(ah->dev)->id == 0 &&
167 (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) == 167 (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
168 (BD_WLAN1 | BD_WLAN0)) 168 (BD_WLAN1 | BD_WLAN0))
169 __set_bit(ATH_STAT_2G_DISABLED, ah->status); 169 ah->ah_capabilities.cap_needs_2GHz_ovr = true;
170 else
171 ah->ah_capabilities.cap_needs_2GHz_ovr = false;
170 } 172 }
171 173
172 ret = ath5k_init_ah(ah, &ath_ahb_bus_ops); 174 ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index bea90e6be70e..bf674161a217 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -27,15 +27,21 @@
27 * or reducing sensitivity as necessary. 27 * or reducing sensitivity as necessary.
28 * 28 *
29 * The parameters are: 29 * The parameters are:
30 *
30 * - "noise immunity" 31 * - "noise immunity"
32 *
31 * - "spur immunity" 33 * - "spur immunity"
34 *
32 * - "firstep level" 35 * - "firstep level"
36 *
33 * - "OFDM weak signal detection" 37 * - "OFDM weak signal detection"
38 *
34 * - "CCK weak signal detection" 39 * - "CCK weak signal detection"
35 * 40 *
36 * Basically we look at the amount of ODFM and CCK timing errors we get and then 41 * Basically we look at the amount of ODFM and CCK timing errors we get and then
37 * raise or lower immunity accordingly by setting one or more of these 42 * raise or lower immunity accordingly by setting one or more of these
38 * parameters. 43 * parameters.
44 *
39 * Newer chipsets have PHY error counters in hardware which will generate a MIB 45 * Newer chipsets have PHY error counters in hardware which will generate a MIB
40 * interrupt when they overflow. Older hardware has too enable PHY error frames 46 * interrupt when they overflow. Older hardware has too enable PHY error frames
41 * by setting a RX flag and then count every single PHY error. When a specified 47 * by setting a RX flag and then count every single PHY error. When a specified
@@ -45,11 +51,13 @@
45 */ 51 */
46 52
47 53
48/*** ANI parameter control ***/ 54/***********************\
55* ANI parameter control *
56\***********************/
49 57
50/** 58/**
51 * ath5k_ani_set_noise_immunity_level() - Set noise immunity level 59 * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
52 * 60 * @ah: The &struct ath5k_hw
53 * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL 61 * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
54 */ 62 */
55void 63void
@@ -91,12 +99,11 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
91 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level); 99 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
92} 100}
93 101
94
95/** 102/**
96 * ath5k_ani_set_spur_immunity_level() - Set spur immunity level 103 * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
97 * 104 * @ah: The &struct ath5k_hw
98 * @level: level between 0 and @max_spur_level (the maximum level is dependent 105 * @level: level between 0 and @max_spur_level (the maximum level is dependent
99 * on the chip revision). 106 * on the chip revision).
100 */ 107 */
101void 108void
102ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level) 109ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
@@ -117,10 +124,9 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
117 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level); 124 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
118} 125}
119 126
120
121/** 127/**
122 * ath5k_ani_set_firstep_level() - Set "firstep" level 128 * ath5k_ani_set_firstep_level() - Set "firstep" level
123 * 129 * @ah: The &struct ath5k_hw
124 * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL 130 * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
125 */ 131 */
126void 132void
@@ -140,11 +146,9 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
140 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level); 146 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
141} 147}
142 148
143
144/** 149/**
145 * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal 150 * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
146 * detection 151 * @ah: The &struct ath5k_hw
147 *
148 * @on: turn on or off 152 * @on: turn on or off
149 */ 153 */
150void 154void
@@ -182,10 +186,9 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
182 on ? "on" : "off"); 186 on ? "on" : "off");
183} 187}
184 188
185
186/** 189/**
187 * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection 190 * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
188 * 191 * @ah: The &struct ath5k_hw
189 * @on: turn on or off 192 * @on: turn on or off
190 */ 193 */
191void 194void
@@ -200,13 +203,16 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
200} 203}
201 204
202 205
203/*** ANI algorithm ***/ 206/***************\
207* ANI algorithm *
208\***************/
204 209
205/** 210/**
206 * ath5k_ani_raise_immunity() - Increase noise immunity 211 * ath5k_ani_raise_immunity() - Increase noise immunity
207 * 212 * @ah: The &struct ath5k_hw
213 * @as: The &struct ath5k_ani_state
208 * @ofdm_trigger: If this is true we are called because of too many OFDM errors, 214 * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
209 * the algorithm will tune more parameters then. 215 * the algorithm will tune more parameters then.
210 * 216 *
211 * Try to raise noise immunity (=decrease sensitivity) in several steps 217 * Try to raise noise immunity (=decrease sensitivity) in several steps
212 * depending on the average RSSI of the beacons we received. 218 * depending on the average RSSI of the beacons we received.
@@ -290,9 +296,10 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
290 */ 296 */
291} 297}
292 298
293
294/** 299/**
295 * ath5k_ani_lower_immunity() - Decrease noise immunity 300 * ath5k_ani_lower_immunity() - Decrease noise immunity
301 * @ah: The &struct ath5k_hw
302 * @as: The &struct ath5k_ani_state
296 * 303 *
297 * Try to lower noise immunity (=increase sensitivity) in several steps 304 * Try to lower noise immunity (=increase sensitivity) in several steps
298 * depending on the average RSSI of the beacons we received. 305 * depending on the average RSSI of the beacons we received.
@@ -352,9 +359,10 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
352 } 359 }
353} 360}
354 361
355
356/** 362/**
357 * ath5k_hw_ani_get_listen_time() - Update counters and return listening time 363 * ath5k_hw_ani_get_listen_time() - Update counters and return listening time
364 * @ah: The &struct ath5k_hw
365 * @as: The &struct ath5k_ani_state
358 * 366 *
359 * Return an approximation of the time spent "listening" in milliseconds (ms) 367 * Return an approximation of the time spent "listening" in milliseconds (ms)
360 * since the last call of this function. 368 * since the last call of this function.
@@ -379,9 +387,10 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
379 return listen; 387 return listen;
380} 388}
381 389
382
383/** 390/**
384 * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters 391 * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
392 * @ah: The &struct ath5k_hw
393 * @as: The &struct ath5k_ani_state
385 * 394 *
386 * Clear the PHY error counters as soon as possible, since this might be called 395 * Clear the PHY error counters as soon as possible, since this might be called
387 * from a MIB interrupt and we want to make sure we don't get interrupted again. 396 * from a MIB interrupt and we want to make sure we don't get interrupted again.
@@ -429,14 +438,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
429 return 1; 438 return 1;
430} 439}
431 440
432
433/** 441/**
434 * ath5k_ani_period_restart() - Restart ANI period 442 * ath5k_ani_period_restart() - Restart ANI period
443 * @as: The &struct ath5k_ani_state
435 * 444 *
436 * Just reset counters, so they are clear for the next "ani period". 445 * Just reset counters, so they are clear for the next "ani period".
437 */ 446 */
438static void 447static void
439ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as) 448ath5k_ani_period_restart(struct ath5k_ani_state *as)
440{ 449{
441 /* keep last values for debugging */ 450 /* keep last values for debugging */
442 as->last_ofdm_errors = as->ofdm_errors; 451 as->last_ofdm_errors = as->ofdm_errors;
@@ -448,9 +457,9 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
448 as->listen_time = 0; 457 as->listen_time = 0;
449} 458}
450 459
451
452/** 460/**
453 * ath5k_ani_calibration() - The main ANI calibration function 461 * ath5k_ani_calibration() - The main ANI calibration function
462 * @ah: The &struct ath5k_hw
454 * 463 *
455 * We count OFDM and CCK errors relative to the time where we did not send or 464 * We count OFDM and CCK errors relative to the time where we did not send or
456 * receive ("listen" time) and raise or lower immunity accordingly. 465 * receive ("listen" time) and raise or lower immunity accordingly.
@@ -492,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
492 /* too many PHY errors - we have to raise immunity */ 501 /* too many PHY errors - we have to raise immunity */
493 bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false; 502 bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
494 ath5k_ani_raise_immunity(ah, as, ofdm_flag); 503 ath5k_ani_raise_immunity(ah, as, ofdm_flag);
495 ath5k_ani_period_restart(ah, as); 504 ath5k_ani_period_restart(as);
496 505
497 } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) { 506 } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
498 /* If more than 5 (TODO: why 5?) periods have passed and we got 507 /* If more than 5 (TODO: why 5?) periods have passed and we got
@@ -504,15 +513,18 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
504 if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low) 513 if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
505 ath5k_ani_lower_immunity(ah, as); 514 ath5k_ani_lower_immunity(ah, as);
506 515
507 ath5k_ani_period_restart(ah, as); 516 ath5k_ani_period_restart(as);
508 } 517 }
509} 518}
510 519
511 520
512/*** INTERRUPT HANDLER ***/ 521/*******************\
522* Interrupt handler *
523\*******************/
513 524
514/** 525/**
515 * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters 526 * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
527 * @ah: The &struct ath5k_hw
516 * 528 *
517 * Just read & reset the registers quickly, so they don't generate more 529 * Just read & reset the registers quickly, so they don't generate more
518 * interrupts, save the counters and schedule the tasklet to decide whether 530 * interrupts, save the counters and schedule the tasklet to decide whether
@@ -549,9 +561,11 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
549 tasklet_schedule(&ah->ani_tasklet); 561 tasklet_schedule(&ah->ani_tasklet);
550} 562}
551 563
552
553/** 564/**
554 * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors 565 * ath5k_ani_phy_error_report - Used by older HW to report PHY errors
566 *
567 * @ah: The &struct ath5k_hw
568 * @phyerr: One of enum ath5k_phy_error_code
555 * 569 *
556 * This is used by hardware without PHY error counters to report PHY errors 570 * This is used by hardware without PHY error counters to report PHY errors
557 * on a frame-by-frame basis, instead of the interrupt. 571 * on a frame-by-frame basis, instead of the interrupt.
@@ -574,10 +588,13 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah,
574} 588}
575 589
576 590
577/*** INIT ***/ 591/****************\
592* Initialization *
593\****************/
578 594
579/** 595/**
580 * ath5k_enable_phy_err_counters() - Enable PHY error counters 596 * ath5k_enable_phy_err_counters() - Enable PHY error counters
597 * @ah: The &struct ath5k_hw
581 * 598 *
582 * Enable PHY error counters for OFDM and CCK timing errors. 599 * Enable PHY error counters for OFDM and CCK timing errors.
583 */ 600 */
@@ -596,9 +613,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
596 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT); 613 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
597} 614}
598 615
599
600/** 616/**
601 * ath5k_disable_phy_err_counters() - Disable PHY error counters 617 * ath5k_disable_phy_err_counters() - Disable PHY error counters
618 * @ah: The &struct ath5k_hw
602 * 619 *
603 * Disable PHY error counters for OFDM and CCK timing errors. 620 * Disable PHY error counters for OFDM and CCK timing errors.
604 */ 621 */
@@ -615,10 +632,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
615 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT); 632 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
616} 633}
617 634
618
619/** 635/**
620 * ath5k_ani_init() - Initialize ANI 636 * ath5k_ani_init() - Initialize ANI
621 * @mode: Which mode to use (auto, manual high, manual low, off) 637 * @ah: The &struct ath5k_hw
638 * @mode: One of enum ath5k_ani_mode
622 * 639 *
623 * Initialize ANI according to mode. 640 * Initialize ANI according to mode.
624 */ 641 */
@@ -695,10 +712,18 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
695} 712}
696 713
697 714
698/*** DEBUG ***/ 715/**************\
716* Debug output *
717\**************/
699 718
700#ifdef CONFIG_ATH5K_DEBUG 719#ifdef CONFIG_ATH5K_DEBUG
701 720
721/**
722 * ath5k_ani_print_counters() - Print ANI counters
723 * @ah: The &struct ath5k_hw
724 *
725 * Used for debugging ANI
726 */
702void 727void
703ath5k_ani_print_counters(struct ath5k_hw *ah) 728ath5k_ani_print_counters(struct ath5k_hw *ah)
704{ 729{
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
index 7358b6c83c6c..21aa355460bb 100644
--- a/drivers/net/wireless/ath/ath5k/ani.h
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -40,13 +40,13 @@ enum ath5k_phy_error_code;
40 * enum ath5k_ani_mode - mode for ANI / noise sensitivity 40 * enum ath5k_ani_mode - mode for ANI / noise sensitivity
41 * 41 *
42 * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI 42 * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
43 * algorithm after it has been on auto mode. 43 * algorithm after it has been on auto mode.
44 * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low, 44 * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
45 * maximizing sensitivity. ANI will not run. 45 * maximizing sensitivity. ANI will not run.
46 * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high, 46 * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
47 * minimizing sensitivity. ANI will not run. 47 * minimizing sensitivity. ANI will not run.
48 * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the 48 * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
49 * amount of OFDM and CCK frame errors (default). 49 * amount of OFDM and CCK frame errors (default).
50 */ 50 */
51enum ath5k_ani_mode { 51enum ath5k_ani_mode {
52 ATH5K_ANI_MODE_OFF = 0, 52 ATH5K_ANI_MODE_OFF = 0,
@@ -58,8 +58,22 @@ enum ath5k_ani_mode {
58 58
59/** 59/**
60 * struct ath5k_ani_state - ANI state and associated counters 60 * struct ath5k_ani_state - ANI state and associated counters
61 * 61 * @ani_mode: One of enum ath5k_ani_mode
62 * @max_spur_level: the maximum spur level is chip dependent 62 * @noise_imm_level: Noise immunity level
63 * @spur_level: Spur immunity level
64 * @firstep_level: FIRstep level
65 * @ofdm_weak_sig: OFDM weak signal detection state (on/off)
66 * @cck_weak_sig: CCK weak signal detection state (on/off)
67 * @max_spur_level: Max spur immunity level (chip specific)
68 * @listen_time: Listen time
69 * @ofdm_errors: OFDM timing error count
70 * @cck_errors: CCK timing error count
71 * @last_cc: The &struct ath_cycle_counters (for stats)
72 * @last_listen: Listen time from previous run (for stats)
73 * @last_ofdm_errors: OFDM timing error count from previous run (for tats)
74 * @last_cck_errors: CCK timing error count from previous run (for stats)
75 * @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
76 * @sum_cck_errors: Sum of all CCK timing errors (for stats)
63 */ 77 */
64struct ath5k_ani_state { 78struct ath5k_ani_state {
65 enum ath5k_ani_mode ani_mode; 79 enum ath5k_ani_mode ani_mode;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index fecbcd9a4259..e564e585b221 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -187,10 +187,9 @@
187#define AR5K_TUNE_MAX_TXPOWER 63 187#define AR5K_TUNE_MAX_TXPOWER 63
188#define AR5K_TUNE_DEFAULT_TXPOWER 25 188#define AR5K_TUNE_DEFAULT_TXPOWER 25
189#define AR5K_TUNE_TPC_TXPOWER false 189#define AR5K_TUNE_TPC_TXPOWER false
190#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */ 190#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 60000 /* 60 sec */
191#define ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT 10000 /* 10 sec */
191#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */ 192#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
192#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
193
194#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */ 193#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */
195 194
196#define AR5K_INIT_CARR_SENSE_EN 1 195#define AR5K_INIT_CARR_SENSE_EN 1
@@ -262,16 +261,34 @@
262#define AR5K_AGC_SETTLING_TURBO 37 261#define AR5K_AGC_SETTLING_TURBO 37
263 262
264 263
265/* GENERIC CHIPSET DEFINITIONS */
266 264
267/* MAC Chips */ 265/*****************************\
266* GENERIC CHIPSET DEFINITIONS *
267\*****************************/
268
269/**
270 * enum ath5k_version - MAC Chips
271 * @AR5K_AR5210: AR5210 (Crete)
272 * @AR5K_AR5211: AR5211 (Oahu/Maui)
273 * @AR5K_AR5212: AR5212 (Venice) and newer
274 */
268enum ath5k_version { 275enum ath5k_version {
269 AR5K_AR5210 = 0, 276 AR5K_AR5210 = 0,
270 AR5K_AR5211 = 1, 277 AR5K_AR5211 = 1,
271 AR5K_AR5212 = 2, 278 AR5K_AR5212 = 2,
272}; 279};
273 280
274/* PHY Chips */ 281/**
282 * enum ath5k_radio - PHY Chips
283 * @AR5K_RF5110: RF5110 (Fez)
284 * @AR5K_RF5111: RF5111 (Sombrero)
285 * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
286 * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
287 * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
288 * @AR5K_RF2316: RF2315/2316 (Cobra SoC)
289 * @AR5K_RF2317: RF2317 (Spider SoC)
290 * @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
291 */
275enum ath5k_radio { 292enum ath5k_radio {
276 AR5K_RF5110 = 0, 293 AR5K_RF5110 = 0,
277 AR5K_RF5111 = 1, 294 AR5K_RF5111 = 1,
@@ -303,11 +320,11 @@ enum ath5k_radio {
303#define AR5K_SREV_AR5213A 0x59 /* Hainan */ 320#define AR5K_SREV_AR5213A 0x59 /* Hainan */
304#define AR5K_SREV_AR2413 0x78 /* Griffin lite */ 321#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
305#define AR5K_SREV_AR2414 0x70 /* Griffin */ 322#define AR5K_SREV_AR2414 0x70 /* Griffin */
306#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */ 323#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
307#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */ 324#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
308#define AR5K_SREV_AR5424 0x90 /* Condor */ 325#define AR5K_SREV_AR5424 0x90 /* Condor */
309#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */ 326#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
310#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */ 327#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
311#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */ 328#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
312#define AR5K_SREV_AR5414 0xa0 /* Eagle */ 329#define AR5K_SREV_AR5414 0xa0 /* Eagle */
313#define AR5K_SREV_AR2415 0xb0 /* Talon */ 330#define AR5K_SREV_AR2415 0xb0 /* Talon */
@@ -344,32 +361,40 @@ enum ath5k_radio {
344 361
345/* TODO add support to mac80211 for vendor-specific rates and modes */ 362/* TODO add support to mac80211 for vendor-specific rates and modes */
346 363
347/* 364/**
365 * DOC: Atheros XR
366 *
348 * Some of this information is based on Documentation from: 367 * Some of this information is based on Documentation from:
349 * 368 *
350 * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG 369 * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
351 * 370 *
352 * Modulation for Atheros' eXtended Range - range enhancing extension that is 371 * Atheros' eXtended Range - range enhancing extension is a modulation scheme
353 * supposed to double the distance an Atheros client device can keep a 372 * that is supposed to double the link distance between an Atheros XR-enabled
354 * connection with an Atheros access point. This is achieved by increasing 373 * client device with an Atheros XR-enabled access point. This is achieved
355 * the receiver sensitivity up to, -105dBm, which is about 20dB above what 374 * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
356 * the 802.11 specifications demand. In addition, new (proprietary) data rates 375 * above what the 802.11 specifications demand. In addition, new (proprietary)
357 * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s. 376 * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
358 * 377 *
359 * Please note that can you either use XR or TURBO but you cannot use both, 378 * Please note that can you either use XR or TURBO but you cannot use both,
360 * they are exclusive. 379 * they are exclusive.
361 * 380 *
381 * Also note that we do not plan to support XR mode at least for now. You can
382 * get a mode similar to XR by using 5MHz bwmode.
362 */ 383 */
363#define MODULATION_XR 0x00000200 384
364/* 385
365 * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a 386/**
366 * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s 387 * DOC: Atheros SuperAG
367 * signaling rate achieved through the bonding of two 54Mbit/s 802.11g 388 *
368 * channels. To use this feature your Access Point must also support it. 389 * In addition to XR we have another modulation scheme called TURBO mode
390 * that is supposed to provide a throughput transmission speed up to 40Mbit/s
391 * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
392 * 54Mbit/s 802.11g channels. To use this feature both ends must support it.
369 * There is also a distinction between "static" and "dynamic" turbo modes: 393 * There is also a distinction between "static" and "dynamic" turbo modes:
370 * 394 *
371 * - Static: is the dumb version: devices set to this mode stick to it until 395 * - Static: is the dumb version: devices set to this mode stick to it until
372 * the mode is turned off. 396 * the mode is turned off.
397 *
373 * - Dynamic: is the intelligent version, the network decides itself if it 398 * - Dynamic: is the intelligent version, the network decides itself if it
374 * is ok to use turbo. As soon as traffic is detected on adjacent channels 399 * is ok to use turbo. As soon as traffic is detected on adjacent channels
375 * (which would get used in turbo mode), or when a non-turbo station joins 400 * (which would get used in turbo mode), or when a non-turbo station joins
@@ -383,24 +408,39 @@ enum ath5k_radio {
383 * 408 *
384 * http://www.pcworld.com/article/id,113428-page,1/article.html 409 * http://www.pcworld.com/article/id,113428-page,1/article.html
385 * 410 *
386 * The channel bonding seems to be driver specific though. In addition to 411 * The channel bonding seems to be driver specific though.
387 * deciding what channels will be used, these "Turbo" modes are accomplished 412 *
388 * by also enabling the following features: 413 * In addition to TURBO modes we also have the following features for even
414 * greater speed-up:
389 * 415 *
390 * - Bursting: allows multiple frames to be sent at once, rather than pausing 416 * - Bursting: allows multiple frames to be sent at once, rather than pausing
391 * after each frame. Bursting is a standards-compliant feature that can be 417 * after each frame. Bursting is a standards-compliant feature that can be
392 * used with any Access Point. 418 * used with any Access Point.
419 *
393 * - Fast frames: increases the amount of information that can be sent per 420 * - Fast frames: increases the amount of information that can be sent per
394 * frame, also resulting in a reduction of transmission overhead. It is a 421 * frame, also resulting in a reduction of transmission overhead. It is a
395 * proprietary feature that needs to be supported by the Access Point. 422 * proprietary feature that needs to be supported by the Access Point.
423 *
396 * - Compression: data frames are compressed in real time using a Lempel Ziv 424 * - Compression: data frames are compressed in real time using a Lempel Ziv
397 * algorithm. This is done transparently. Once this feature is enabled, 425 * algorithm. This is done transparently. Once this feature is enabled,
398 * compression and decompression takes place inside the chipset, without 426 * compression and decompression takes place inside the chipset, without
399 * putting additional load on the host CPU. 427 * putting additional load on the host CPU.
400 * 428 *
429 * As with XR we also don't plan to support SuperAG features for now. You can
430 * get a mode similar to TURBO by using 40MHz bwmode.
401 */ 431 */
402#define MODULATION_TURBO 0x00000080
403 432
433
434/**
435 * enum ath5k_driver_mode - PHY operation mode
436 * @AR5K_MODE_11A: 802.11a
437 * @AR5K_MODE_11B: 802.11b
438 * @AR5K_MODE_11G: 801.11g
439 * @AR5K_MODE_MAX: Used for boundary checks
440 *
441 * Do not change the order here, we use these as
442 * array indices and it also maps EEPROM structures.
443 */
404enum ath5k_driver_mode { 444enum ath5k_driver_mode {
405 AR5K_MODE_11A = 0, 445 AR5K_MODE_11A = 0,
406 AR5K_MODE_11B = 1, 446 AR5K_MODE_11B = 1,
@@ -408,30 +448,64 @@ enum ath5k_driver_mode {
408 AR5K_MODE_MAX = 3 448 AR5K_MODE_MAX = 3
409}; 449};
410 450
451/**
452 * enum ath5k_ant_mode - Antenna operation mode
453 * @AR5K_ANTMODE_DEFAULT: Default antenna setup
454 * @AR5K_ANTMODE_FIXED_A: Only antenna A is present
455 * @AR5K_ANTMODE_FIXED_B: Only antenna B is present
456 * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
457 * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
458 * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
459 * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
460 * @AR5K_ANTMODE_MAX: Used for boundary checks
461 *
462 * For more infos on antenna control check out phy.c
463 */
411enum ath5k_ant_mode { 464enum ath5k_ant_mode {
412 AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */ 465 AR5K_ANTMODE_DEFAULT = 0,
413 AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */ 466 AR5K_ANTMODE_FIXED_A = 1,
414 AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */ 467 AR5K_ANTMODE_FIXED_B = 2,
415 AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */ 468 AR5K_ANTMODE_SINGLE_AP = 3,
416 AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */ 469 AR5K_ANTMODE_SECTOR_AP = 4,
417 AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */ 470 AR5K_ANTMODE_SECTOR_STA = 5,
418 AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */ 471 AR5K_ANTMODE_DEBUG = 6,
419 AR5K_ANTMODE_MAX, 472 AR5K_ANTMODE_MAX,
420}; 473};
421 474
475/**
476 * enum ath5k_bw_mode - Bandwidth operation mode
477 * @AR5K_BWMODE_DEFAULT: 20MHz, default operation
478 * @AR5K_BWMODE_5MHZ: Quarter rate
479 * @AR5K_BWMODE_10MHZ: Half rate
480 * @AR5K_BWMODE_40MHZ: Turbo
481 */
422enum ath5k_bw_mode { 482enum ath5k_bw_mode {
423 AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */ 483 AR5K_BWMODE_DEFAULT = 0,
424 AR5K_BWMODE_5MHZ = 1, /* Quarter rate */ 484 AR5K_BWMODE_5MHZ = 1,
425 AR5K_BWMODE_10MHZ = 2, /* Half rate */ 485 AR5K_BWMODE_10MHZ = 2,
426 AR5K_BWMODE_40MHZ = 3 /* Turbo */ 486 AR5K_BWMODE_40MHZ = 3
427}; 487};
428 488
489
490
429/****************\ 491/****************\
430 TX DEFINITIONS 492 TX DEFINITIONS
431\****************/ 493\****************/
432 494
433/* 495/**
434 * TX Status descriptor 496 * struct ath5k_tx_status - TX Status descriptor
497 * @ts_seqnum: Sequence number
498 * @ts_tstamp: Timestamp
499 * @ts_status: Status code
500 * @ts_final_idx: Final transmission series index
501 * @ts_final_retry: Final retry count
502 * @ts_rssi: RSSI for received ACK
503 * @ts_shortretry: Short retry count
504 * @ts_virtcol: Virtual collision count
505 * @ts_antenna: Antenna used
506 *
507 * TX status descriptor gets filled by the hw
508 * on each transmission attempt.
435 */ 509 */
436struct ath5k_tx_status { 510struct ath5k_tx_status {
437 u16 ts_seqnum; 511 u16 ts_seqnum;
@@ -454,7 +528,6 @@ struct ath5k_tx_status {
454 * enum ath5k_tx_queue - Queue types used to classify tx queues. 528 * enum ath5k_tx_queue - Queue types used to classify tx queues.
455 * @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue 529 * @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
456 * @AR5K_TX_QUEUE_DATA: A normal data queue 530 * @AR5K_TX_QUEUE_DATA: A normal data queue
457 * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue
458 * @AR5K_TX_QUEUE_BEACON: The beacon queue 531 * @AR5K_TX_QUEUE_BEACON: The beacon queue
459 * @AR5K_TX_QUEUE_CAB: The after-beacon queue 532 * @AR5K_TX_QUEUE_CAB: The after-beacon queue
460 * @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue 533 * @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
@@ -462,7 +535,6 @@ struct ath5k_tx_status {
462enum ath5k_tx_queue { 535enum ath5k_tx_queue {
463 AR5K_TX_QUEUE_INACTIVE = 0, 536 AR5K_TX_QUEUE_INACTIVE = 0,
464 AR5K_TX_QUEUE_DATA, 537 AR5K_TX_QUEUE_DATA,
465 AR5K_TX_QUEUE_XR_DATA,
466 AR5K_TX_QUEUE_BEACON, 538 AR5K_TX_QUEUE_BEACON,
467 AR5K_TX_QUEUE_CAB, 539 AR5K_TX_QUEUE_CAB,
468 AR5K_TX_QUEUE_UAPSD, 540 AR5K_TX_QUEUE_UAPSD,
@@ -471,36 +543,46 @@ enum ath5k_tx_queue {
471#define AR5K_NUM_TX_QUEUES 10 543#define AR5K_NUM_TX_QUEUES 10
472#define AR5K_NUM_TX_QUEUES_NOQCU 2 544#define AR5K_NUM_TX_QUEUES_NOQCU 2
473 545
474/* 546/**
475 * Queue syb-types to classify normal data queues. 547 * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
548 * @AR5K_WME_AC_BK: Background traffic
549 * @AR5K_WME_AC_BE: Best-effort (normal) traffic
550 * @AR5K_WME_AC_VI: Video traffic
551 * @AR5K_WME_AC_VO: Voice traffic
552 *
476 * These are the 4 Access Categories as defined in 553 * These are the 4 Access Categories as defined in
477 * WME spec. 0 is the lowest priority and 4 is the 554 * WME spec. 0 is the lowest priority and 4 is the
478 * highest. Normal data that hasn't been classified 555 * highest. Normal data that hasn't been classified
479 * goes to the Best Effort AC. 556 * goes to the Best Effort AC.
480 */ 557 */
481enum ath5k_tx_queue_subtype { 558enum ath5k_tx_queue_subtype {
482 AR5K_WME_AC_BK = 0, /*Background traffic*/ 559 AR5K_WME_AC_BK = 0,
483 AR5K_WME_AC_BE, /*Best-effort (normal) traffic*/ 560 AR5K_WME_AC_BE,
484 AR5K_WME_AC_VI, /*Video traffic*/ 561 AR5K_WME_AC_VI,
485 AR5K_WME_AC_VO, /*Voice traffic*/ 562 AR5K_WME_AC_VO,
486}; 563};
487 564
488/* 565/**
489 * Queue ID numbers as returned by the hw functions, each number 566 * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
490 * represents a hw queue. If hw does not support hw queues 567 * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
491 * (eg 5210) all data goes in one queue. These match 568 * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
492 * d80211 definitions (net80211/MadWiFi don't use them). 569 * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
570 * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
571 * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
572 * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
573 * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
574 *
575 * Each number represents a hw queue. If hw does not support hw queues
576 * (eg 5210) all data goes in one queue.
493 */ 577 */
494enum ath5k_tx_queue_id { 578enum ath5k_tx_queue_id {
495 AR5K_TX_QUEUE_ID_NOQCU_DATA = 0, 579 AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
496 AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1, 580 AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
497 AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/ 581 AR5K_TX_QUEUE_ID_DATA_MIN = 0,
498 AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/ 582 AR5K_TX_QUEUE_ID_DATA_MAX = 3,
499 AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/ 583 AR5K_TX_QUEUE_ID_UAPSD = 7,
500 AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/ 584 AR5K_TX_QUEUE_ID_CAB = 8,
501 AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/ 585 AR5K_TX_QUEUE_ID_BEACON = 9,
502 AR5K_TX_QUEUE_ID_UAPSD = 8,
503 AR5K_TX_QUEUE_ID_XR_DATA = 9,
504}; 586};
505 587
506/* 588/*
@@ -521,46 +603,70 @@ enum ath5k_tx_queue_id {
521#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */ 603#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
522#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/ 604#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
523 605
524/* 606/**
525 * Data transmit queue state. One of these exists for each 607 * struct ath5k_txq - Transmit queue state
526 * hardware transmit queue. Packets sent to us from above 608 * @qnum: Hardware q number
527 * are assigned to queues based on their priority. Not all 609 * @link: Link ptr in last TX desc
528 * devices support a complete set of hardware transmit queues. 610 * @q: Transmit queue (&struct list_head)
529 * For those devices the array sc_ac2q will map multiple 611 * @lock: Lock on q and link
530 * priorities to fewer hardware queues (typically all to one 612 * @setup: Is the queue configured
531 * hardware queue). 613 * @txq_len:Number of queued buffers
614 * @txq_max: Max allowed num of queued buffers
615 * @txq_poll_mark: Used to check if queue got stuck
616 * @txq_stuck: Queue stuck counter
617 *
618 * One of these exists for each hardware transmit queue.
619 * Packets sent to us from above are assigned to queues based
620 * on their priority. Not all devices support a complete set
621 * of hardware transmit queues. For those devices the array
622 * sc_ac2q will map multiple priorities to fewer hardware queues
623 * (typically all to one hardware queue).
532 */ 624 */
533struct ath5k_txq { 625struct ath5k_txq {
534 unsigned int qnum; /* hardware q number */ 626 unsigned int qnum;
535 u32 *link; /* link ptr in last TX desc */ 627 u32 *link;
536 struct list_head q; /* transmit queue */ 628 struct list_head q;
537 spinlock_t lock; /* lock on q and link */ 629 spinlock_t lock;
538 bool setup; 630 bool setup;
539 int txq_len; /* number of queued buffers */ 631 int txq_len;
540 int txq_max; /* max allowed num of queued buffers */ 632 int txq_max;
541 bool txq_poll_mark; 633 bool txq_poll_mark;
542 unsigned int txq_stuck; /* informational counter */ 634 unsigned int txq_stuck;
543}; 635};
544 636
545/* 637/**
546 * A struct to hold tx queue's parameters 638 * struct ath5k_txq_info - A struct to hold TX queue's parameters
639 * @tqi_type: One of enum ath5k_tx_queue
640 * @tqi_subtype: One of enum ath5k_tx_queue_subtype
641 * @tqi_flags: TX queue flags (see above)
642 * @tqi_aifs: Arbitrated Inter-frame Space
643 * @tqi_cw_min: Minimum Contention Window
644 * @tqi_cw_max: Maximum Contention Window
645 * @tqi_cbr_period: Constant bit rate period
646 * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
547 */ 647 */
548struct ath5k_txq_info { 648struct ath5k_txq_info {
549 enum ath5k_tx_queue tqi_type; 649 enum ath5k_tx_queue tqi_type;
550 enum ath5k_tx_queue_subtype tqi_subtype; 650 enum ath5k_tx_queue_subtype tqi_subtype;
551 u16 tqi_flags; /* Tx queue flags (see above) */ 651 u16 tqi_flags;
552 u8 tqi_aifs; /* Arbitrated Interframe Space */ 652 u8 tqi_aifs;
553 u16 tqi_cw_min; /* Minimum Contention Window */ 653 u16 tqi_cw_min;
554 u16 tqi_cw_max; /* Maximum Contention Window */ 654 u16 tqi_cw_max;
555 u32 tqi_cbr_period; /* Constant bit rate period */ 655 u32 tqi_cbr_period;
556 u32 tqi_cbr_overflow_limit; 656 u32 tqi_cbr_overflow_limit;
557 u32 tqi_burst_time; 657 u32 tqi_burst_time;
558 u32 tqi_ready_time; /* Time queue waits after an event */ 658 u32 tqi_ready_time;
559}; 659};
560 660
561/* 661/**
562 * Transmit packet types. 662 * enum ath5k_pkt_type - Transmit packet types
563 * used on tx control descriptor 663 * @AR5K_PKT_TYPE_NORMAL: Normal data
664 * @AR5K_PKT_TYPE_ATIM: ATIM
665 * @AR5K_PKT_TYPE_PSPOLL: PS-Poll
666 * @AR5K_PKT_TYPE_BEACON: Beacon
667 * @AR5K_PKT_TYPE_PROBE_RESP: Probe response
668 * @AR5K_PKT_TYPE_PIFS: PIFS
669 * Used on tx control descriptor
564 */ 670 */
565enum ath5k_pkt_type { 671enum ath5k_pkt_type {
566 AR5K_PKT_TYPE_NORMAL = 0, 672 AR5K_PKT_TYPE_NORMAL = 0,
@@ -583,27 +689,23 @@ enum ath5k_pkt_type {
583 (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \ 689 (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
584) 690)
585 691
586/*
587 * DMA size definitions (2^(n+2))
588 */
589enum ath5k_dmasize {
590 AR5K_DMASIZE_4B = 0,
591 AR5K_DMASIZE_8B,
592 AR5K_DMASIZE_16B,
593 AR5K_DMASIZE_32B,
594 AR5K_DMASIZE_64B,
595 AR5K_DMASIZE_128B,
596 AR5K_DMASIZE_256B,
597 AR5K_DMASIZE_512B
598};
599 692
600 693
601/****************\ 694/****************\
602 RX DEFINITIONS 695 RX DEFINITIONS
603\****************/ 696\****************/
604 697
605/* 698/**
606 * RX Status descriptor 699 * struct ath5k_rx_status - RX Status descriptor
700 * @rs_datalen: Data length
701 * @rs_tstamp: Timestamp
702 * @rs_status: Status code
703 * @rs_phyerr: PHY error mask
704 * @rs_rssi: RSSI in 0.5dbm units
705 * @rs_keyix: Index to the key used for decrypting
706 * @rs_rate: Rate used to decode the frame
707 * @rs_antenna: Antenna used to receive the frame
708 * @rs_more: Indicates this is a frame fragment (Fast frames)
607 */ 709 */
608struct ath5k_rx_status { 710struct ath5k_rx_status {
609 u16 rs_datalen; 711 u16 rs_datalen;
@@ -645,10 +747,18 @@ struct ath5k_rx_status {
645#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10) 747#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
646 748
647 749
750
648/*******************************\ 751/*******************************\
649 GAIN OPTIMIZATION DEFINITIONS 752 GAIN OPTIMIZATION DEFINITIONS
650\*******************************/ 753\*******************************/
651 754
755/**
756 * enum ath5k_rfgain - RF Gain optimization engine state
757 * @AR5K_RFGAIN_INACTIVE: Engine disabled
758 * @AR5K_RFGAIN_ACTIVE: Probe active
759 * @AR5K_RFGAIN_READ_REQUESTED: Probe requested
760 * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
761 */
652enum ath5k_rfgain { 762enum ath5k_rfgain {
653 AR5K_RFGAIN_INACTIVE = 0, 763 AR5K_RFGAIN_INACTIVE = 0,
654 AR5K_RFGAIN_ACTIVE, 764 AR5K_RFGAIN_ACTIVE,
@@ -656,6 +766,16 @@ enum ath5k_rfgain {
656 AR5K_RFGAIN_NEED_CHANGE, 766 AR5K_RFGAIN_NEED_CHANGE,
657}; 767};
658 768
769/**
770 * struct ath5k_gain - RF Gain optimization engine state data
771 * @g_step_idx: Current step index
772 * @g_current: Current gain
773 * @g_target: Target gain
774 * @g_low: Low gain boundary
775 * @g_high: High gain boundary
776 * @g_f_corr: Gain_F correction
777 * @g_state: One of enum ath5k_rfgain
778 */
659struct ath5k_gain { 779struct ath5k_gain {
660 u8 g_step_idx; 780 u8 g_step_idx;
661 u8 g_current; 781 u8 g_current;
@@ -666,6 +786,8 @@ struct ath5k_gain {
666 u8 g_state; 786 u8 g_state;
667}; 787};
668 788
789
790
669/********************\ 791/********************\
670 COMMON DEFINITIONS 792 COMMON DEFINITIONS
671\********************/ 793\********************/
@@ -674,9 +796,14 @@ struct ath5k_gain {
674#define AR5K_SLOT_TIME_20 880 796#define AR5K_SLOT_TIME_20 880
675#define AR5K_SLOT_TIME_MAX 0xffff 797#define AR5K_SLOT_TIME_MAX 0xffff
676 798
677/* 799/**
678 * The following structure is used to map 2GHz channels to 800 * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
679 * 5GHz Atheros channels. 801 * @a2_flags: Channel flags (internal)
802 * @a2_athchan: HW channel number (internal)
803 *
804 * This structure is used to map 2GHz channels to
805 * 5GHz Atheros channels on 2111 frequency converter
806 * that comes together with RF5111
680 * TODO: Clean up 807 * TODO: Clean up
681 */ 808 */
682struct ath5k_athchan_2ghz { 809struct ath5k_athchan_2ghz {
@@ -684,36 +811,80 @@ struct ath5k_athchan_2ghz {
684 u16 a2_athchan; 811 u16 a2_athchan;
685}; 812};
686 813
814/**
815 * enum ath5k_dmasize - DMA size definitions (2^(n+2))
816 * @AR5K_DMASIZE_4B: 4Bytes
817 * @AR5K_DMASIZE_8B: 8Bytes
818 * @AR5K_DMASIZE_16B: 16Bytes
819 * @AR5K_DMASIZE_32B: 32Bytes
820 * @AR5K_DMASIZE_64B: 64Bytes (Default)
821 * @AR5K_DMASIZE_128B: 128Bytes
822 * @AR5K_DMASIZE_256B: 256Bytes
823 * @AR5K_DMASIZE_512B: 512Bytes
824 *
825 * These are used to set DMA burst size on hw
826 *
827 * Note: Some platforms can't handle more than 4Bytes
828 * be careful on embedded boards.
829 */
830enum ath5k_dmasize {
831 AR5K_DMASIZE_4B = 0,
832 AR5K_DMASIZE_8B,
833 AR5K_DMASIZE_16B,
834 AR5K_DMASIZE_32B,
835 AR5K_DMASIZE_64B,
836 AR5K_DMASIZE_128B,
837 AR5K_DMASIZE_256B,
838 AR5K_DMASIZE_512B
839};
840
841
687 842
688/******************\ 843/******************\
689 RATE DEFINITIONS 844 RATE DEFINITIONS
690\******************/ 845\******************/
691 846
692/** 847/**
848 * DOC: Rate codes
849 *
693 * Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32. 850 * Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
694 * 851 *
695 * The rate code is used to get the RX rate or set the TX rate on the 852 * The rate code is used to get the RX rate or set the TX rate on the
696 * hardware descriptors. It is also used for internal modulation control 853 * hardware descriptors. It is also used for internal modulation control
697 * and settings. 854 * and settings.
698 * 855 *
699 * This is the hardware rate map we are aware of: 856 * This is the hardware rate map we are aware of (html unfriendly):
700 *
701 * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08
702 * rate_kbps 3000 1000 ? ? ? 2000 500 48000
703 *
704 * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10
705 * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ?
706 * 857 *
707 * rate_code 17 18 19 20 21 22 23 24 858 * Rate code Rate (Kbps)
708 * rate_kbps ? ? ? ? ? ? ? 11000 859 * --------- -----------
860 * 0x01 3000 (XR)
861 * 0x02 1000 (XR)
862 * 0x03 250 (XR)
863 * 0x04 - 05 -Reserved-
864 * 0x06 2000 (XR)
865 * 0x07 500 (XR)
866 * 0x08 48000 (OFDM)
867 * 0x09 24000 (OFDM)
868 * 0x0A 12000 (OFDM)
869 * 0x0B 6000 (OFDM)
870 * 0x0C 54000 (OFDM)
871 * 0x0D 36000 (OFDM)
872 * 0x0E 18000 (OFDM)
873 * 0x0F 9000 (OFDM)
874 * 0x10 - 17 -Reserved-
875 * 0x18 11000L (CCK)
876 * 0x19 5500L (CCK)
877 * 0x1A 2000L (CCK)
878 * 0x1B 1000L (CCK)
879 * 0x1C 11000S (CCK)
880 * 0x1D 5500S (CCK)
881 * 0x1E 2000S (CCK)
882 * 0x1F -Reserved-
709 * 883 *
710 * rate_code 25 26 27 28 29 30 31 32 884 * "S" indicates CCK rates with short preamble and "L" with long preamble.
711 * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ?
712 *
713 * "S" indicates CCK rates with short preamble.
714 * 885 *
715 * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the 886 * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
716 * lowest 4 bits, so they are the same as below with a 0xF mask. 887 * lowest 4 bits, so they are the same as above with a 0xF mask.
717 * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M). 888 * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
718 * We handle this in ath5k_setup_bands(). 889 * We handle this in ath5k_setup_bands().
719 */ 890 */
@@ -733,13 +904,9 @@ struct ath5k_athchan_2ghz {
733#define ATH5K_RATE_CODE_36M 0x0D 904#define ATH5K_RATE_CODE_36M 0x0D
734#define ATH5K_RATE_CODE_48M 0x08 905#define ATH5K_RATE_CODE_48M 0x08
735#define ATH5K_RATE_CODE_54M 0x0C 906#define ATH5K_RATE_CODE_54M 0x0C
736/* XR */
737#define ATH5K_RATE_CODE_XR_500K 0x07
738#define ATH5K_RATE_CODE_XR_1M 0x02
739#define ATH5K_RATE_CODE_XR_2M 0x06
740#define ATH5K_RATE_CODE_XR_3M 0x01
741 907
742/* adding this flag to rate_code enables short preamble */ 908/* Adding this flag to rate_code on B rates
909 * enables short preamble */
743#define AR5K_SET_SHORT_PREAMBLE 0x04 910#define AR5K_SET_SHORT_PREAMBLE 0x04
744 911
745/* 912/*
@@ -769,49 +936,65 @@ extern int ath5k_modparam_nohwcrypt;
769 936
770/** 937/**
771 * enum ath5k_int - Hardware interrupt masks helpers 938 * enum ath5k_int - Hardware interrupt masks helpers
939 * @AR5K_INT_RXOK: Frame successfully received
940 * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
941 * @AR5K_INT_RXERR: Frame reception failed
942 * @AR5K_INT_RXNOFRM: No frame received within a specified time period
943 * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
944 * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
945 * not always fatal, on some chips we can continue operation
946 * without resetting the card, that's why %AR5K_INT_FATAL is not
947 * common for all chips.
948 * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
949 *
950 * @AR5K_INT_TXOK: Frame transmission success
951 * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
952 * @AR5K_INT_TXERR: Frame transmission failure
953 * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
954 * Queue Control Unit (QCU) signals an EOL interrupt only if a
955 * descriptor's LinkPtr is NULL. For more details, refer to:
956 * "http://www.freepatentsonline.com/20030225739.html"
957 * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
958 * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
959 * increase the TX trigger threshold.
960 * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
772 * 961 *
773 * @AR5K_INT_RX: mask to identify received frame interrupts, of type
774 * AR5K_ISR_RXOK or AR5K_ISR_RXERR
775 * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
776 * @AR5K_INT_RXNOFRM: No frame received (?)
777 * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
778 * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
779 * LinkPtr is NULL. For more details, refer to:
780 * http://www.freepatentsonline.com/20030225739.html
781 * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
782 * Note that Rx overrun is not always fatal, on some chips we can continue
783 * operation without resetting the card, that's why int_fatal is not
784 * common for all chips.
785 * @AR5K_INT_TX: mask to identify received frame interrupts, of type
786 * AR5K_ISR_TXOK or AR5K_ISR_TXERR
787 * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
788 * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
789 * We currently do increments on interrupt by
790 * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
791 * @AR5K_INT_MIB: Indicates the either Management Information Base counters or 962 * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
792 * one of the PHY error counters reached the maximum value and should be 963 * one of the PHY error counters reached the maximum value and
793 * read and cleared. 964 * should be read and cleared.
965 * @AR5K_INT_SWI: Software triggered interrupt.
794 * @AR5K_INT_RXPHY: RX PHY Error 966 * @AR5K_INT_RXPHY: RX PHY Error
795 * @AR5K_INT_RXKCM: RX Key cache miss 967 * @AR5K_INT_RXKCM: RX Key cache miss
796 * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a 968 * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
797 * beacon that must be handled in software. The alternative is if you 969 * beacon that must be handled in software. The alternative is if
798 * have VEOL support, in that case you let the hardware deal with things. 970 * you have VEOL support, in that case you let the hardware deal
971 * with things.
972 * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
799 * @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing 973 * @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
800 * beacons from the AP have associated with, we should probably try to 974 * beacons from the AP have associated with, we should probably
801 * reassociate. When in IBSS mode this might mean we have not received 975 * try to reassociate. When in IBSS mode this might mean we have
802 * any beacons from any local stations. Note that every station in an 976 * not received any beacons from any local stations. Note that
803 * IBSS schedules to send beacons at the Target Beacon Transmission Time 977 * every station in an IBSS schedules to send beacons at the
804 * (TBTT) with a random backoff. 978 * Target Beacon Transmission Time (TBTT) with a random backoff.
805 * @AR5K_INT_BNR: Beacon Not Ready interrupt - ?? 979 * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
806 * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now 980 * @AR5K_INT_TIM: Beacon with local station's TIM bit set
807 * until properly handled 981 * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
808 * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA 982 * @AR5K_INT_DTIM_SYNC: DTIM sync lost
809 * errors. These types of errors we can enable seem to be of type 983 * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
810 * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR. 984 * our GPIO pins.
985 * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
986 * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
987 * nothing or an incomplete CAB frame sequence.
988 * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
989 * @AR5K_INT_QCBRURN: A queue got triggered wile empty
990 * @AR5K_INT_QTRIG: A queue got triggered
991 *
992 * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
993 * errors. Indicates we need to reset the card.
811 * @AR5K_INT_GLOBAL: Used to clear and set the IER 994 * @AR5K_INT_GLOBAL: Used to clear and set the IER
812 * @AR5K_INT_NOCARD: signals the card has been removed 995 * @AR5K_INT_NOCARD: Signals the card has been removed
813 * @AR5K_INT_COMMON: common interrupts shared among MACs with the same 996 * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
814 * bit value 997 * bit value
815 * 998 *
816 * These are mapped to take advantage of some common bits 999 * These are mapped to take advantage of some common bits
817 * between the MACs, to be able to set intr properties 1000 * between the MACs, to be able to set intr properties
@@ -847,15 +1030,15 @@ enum ath5k_int {
847 AR5K_INT_GPIO = 0x01000000, 1030 AR5K_INT_GPIO = 0x01000000,
848 AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */ 1031 AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */
849 AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */ 1032 AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */
850 AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */ 1033 AR5K_INT_QCBRORN = 0x08000000, /* Non common */
851 AR5K_INT_QCBRORN = 0x10000000, /* Non common */ 1034 AR5K_INT_QCBRURN = 0x10000000, /* Non common */
852 AR5K_INT_QCBRURN = 0x20000000, /* Non common */ 1035 AR5K_INT_QTRIG = 0x20000000, /* Non common */
853 AR5K_INT_QTRIG = 0x40000000, /* Non common */
854 AR5K_INT_GLOBAL = 0x80000000, 1036 AR5K_INT_GLOBAL = 0x80000000,
855 1037
856 AR5K_INT_TX_ALL = AR5K_INT_TXOK 1038 AR5K_INT_TX_ALL = AR5K_INT_TXOK
857 | AR5K_INT_TXDESC 1039 | AR5K_INT_TXDESC
858 | AR5K_INT_TXERR 1040 | AR5K_INT_TXERR
1041 | AR5K_INT_TXNOFRM
859 | AR5K_INT_TXEOL 1042 | AR5K_INT_TXEOL
860 | AR5K_INT_TXURN, 1043 | AR5K_INT_TXURN,
861 1044
@@ -891,15 +1074,32 @@ enum ath5k_int {
891 AR5K_INT_NOCARD = 0xffffffff 1074 AR5K_INT_NOCARD = 0xffffffff
892}; 1075};
893 1076
894/* mask which calibration is active at the moment */ 1077/**
1078 * enum ath5k_calibration_mask - Mask which calibration is active at the moment
1079 * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
1080 * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
1081 * @AR5K_CALIBRATION_NF: Noise Floor calibration
1082 * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
1083 */
895enum ath5k_calibration_mask { 1084enum ath5k_calibration_mask {
896 AR5K_CALIBRATION_FULL = 0x01, 1085 AR5K_CALIBRATION_FULL = 0x01,
897 AR5K_CALIBRATION_SHORT = 0x02, 1086 AR5K_CALIBRATION_SHORT = 0x02,
898 AR5K_CALIBRATION_ANI = 0x04, 1087 AR5K_CALIBRATION_NF = 0x04,
1088 AR5K_CALIBRATION_ANI = 0x08,
899}; 1089};
900 1090
901/* 1091/**
902 * Power management 1092 * enum ath5k_power_mode - Power management modes
1093 * @AR5K_PM_UNDEFINED: Undefined
1094 * @AR5K_PM_AUTO: Allow card to sleep if possible
1095 * @AR5K_PM_AWAKE: Force card to wake up
1096 * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
1097 * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
1098 *
1099 * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
1100 * are also known to have problems on some cards. This is not a big
1101 * problem though because we can have almost the same effect as
1102 * FULL_SLEEP by putting card on warm reset (it's almost powered down).
903 */ 1103 */
904enum ath5k_power_mode { 1104enum ath5k_power_mode {
905 AR5K_PM_UNDEFINED = 0, 1105 AR5K_PM_UNDEFINED = 0,
@@ -957,6 +1157,8 @@ struct ath5k_capabilities {
957 } cap_queues; 1157 } cap_queues;
958 1158
959 bool cap_has_phyerr_counters; 1159 bool cap_has_phyerr_counters;
1160 bool cap_has_mrr_support;
1161 bool cap_needs_2GHz_ovr;
960}; 1162};
961 1163
962/* size of noise floor history (keep it a power of two) */ 1164/* size of noise floor history (keep it a power of two) */
@@ -1072,13 +1274,11 @@ struct ath5k_hw {
1072 dma_addr_t desc_daddr; /* DMA (physical) address */ 1274 dma_addr_t desc_daddr; /* DMA (physical) address */
1073 size_t desc_len; /* size of TX/RX descriptors */ 1275 size_t desc_len; /* size of TX/RX descriptors */
1074 1276
1075 DECLARE_BITMAP(status, 6); 1277 DECLARE_BITMAP(status, 4);
1076#define ATH_STAT_INVALID 0 /* disable hardware accesses */ 1278#define ATH_STAT_INVALID 0 /* disable hardware accesses */
1077#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */ 1279#define ATH_STAT_PROMISC 1
1078#define ATH_STAT_PROMISC 2 1280#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
1079#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */ 1281#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
1080#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
1081#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
1082 1282
1083 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ 1283 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
1084 struct ieee80211_channel *curchan; /* current h/w channel */ 1284 struct ieee80211_channel *curchan; /* current h/w channel */
@@ -1097,6 +1297,7 @@ struct ath5k_hw {
1097 led_on; /* pin setting for LED on */ 1297 led_on; /* pin setting for LED on */
1098 1298
1099 struct work_struct reset_work; /* deferred chip reset */ 1299 struct work_struct reset_work; /* deferred chip reset */
1300 struct work_struct calib_work; /* deferred phy calibration */
1100 1301
1101 struct list_head rxbuf; /* receive buffer */ 1302 struct list_head rxbuf; /* receive buffer */
1102 spinlock_t rxbuflock; 1303 spinlock_t rxbuflock;
@@ -1113,8 +1314,6 @@ struct ath5k_hw {
1113 1314
1114 struct ath5k_rfkill rf_kill; 1315 struct ath5k_rfkill rf_kill;
1115 1316
1116 struct tasklet_struct calib; /* calibration tasklet */
1117
1118 spinlock_t block; /* protects beacon */ 1317 spinlock_t block; /* protects beacon */
1119 struct tasklet_struct beacontq; /* beacon intr tasklet */ 1318 struct tasklet_struct beacontq; /* beacon intr tasklet */
1120 struct list_head bcbuf; /* beacon buffer */ 1319 struct list_head bcbuf; /* beacon buffer */
@@ -1144,7 +1343,7 @@ struct ath5k_hw {
1144 enum ath5k_int ah_imr; 1343 enum ath5k_int ah_imr;
1145 1344
1146 struct ieee80211_channel *ah_current_channel; 1345 struct ieee80211_channel *ah_current_channel;
1147 bool ah_calibration; 1346 bool ah_iq_cal_needed;
1148 bool ah_single_chip; 1347 bool ah_single_chip;
1149 1348
1150 enum ath5k_version ah_version; 1349 enum ath5k_version ah_version;
@@ -1187,7 +1386,13 @@ struct ath5k_hw {
1187 u32 ah_txq_imr_cbrurn; 1386 u32 ah_txq_imr_cbrurn;
1188 u32 ah_txq_imr_qtrig; 1387 u32 ah_txq_imr_qtrig;
1189 u32 ah_txq_imr_nofrm; 1388 u32 ah_txq_imr_nofrm;
1190 u32 ah_txq_isr; 1389
1390 u32 ah_txq_isr_txok_all;
1391 u32 ah_txq_isr_txurn;
1392 u32 ah_txq_isr_qcborn;
1393 u32 ah_txq_isr_qcburn;
1394 u32 ah_txq_isr_qtrig;
1395
1191 u32 *ah_rf_banks; 1396 u32 *ah_rf_banks;
1192 size_t ah_rf_banks_size; 1397 size_t ah_rf_banks_size;
1193 size_t ah_rf_regs_count; 1398 size_t ah_rf_regs_count;
@@ -1228,8 +1433,8 @@ struct ath5k_hw {
1228 1433
1229 /* Calibration timestamp */ 1434 /* Calibration timestamp */
1230 unsigned long ah_cal_next_full; 1435 unsigned long ah_cal_next_full;
1436 unsigned long ah_cal_next_short;
1231 unsigned long ah_cal_next_ani; 1437 unsigned long ah_cal_next_ani;
1232 unsigned long ah_cal_next_nf;
1233 1438
1234 /* Calibration mask */ 1439 /* Calibration mask */
1235 u8 ah_cal_mask; 1440 u8 ah_cal_mask;
@@ -1338,11 +1543,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
1338u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); 1543u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
1339void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64); 1544void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
1340void ath5k_hw_reset_tsf(struct ath5k_hw *ah); 1545void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
1341void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval); 1546void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
1547 u32 interval);
1342bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval); 1548bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
1343/* Init function */ 1549/* Init function */
1344void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode, 1550void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
1345 u8 mode);
1346 1551
1347/* Queue Control Unit, DFS Control Unit Functions */ 1552/* Queue Control Unit, DFS Control Unit Functions */
1348int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, 1553int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 91627dd2c26a..d7114c75fe9b 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -27,8 +27,7 @@
27#include "debug.h" 27#include "debug.h"
28 28
29/** 29/**
30 * ath5k_hw_post - Power On Self Test helper function 30 * ath5k_hw_post() - Power On Self Test helper function
31 *
32 * @ah: The &struct ath5k_hw 31 * @ah: The &struct ath5k_hw
33 */ 32 */
34static int ath5k_hw_post(struct ath5k_hw *ah) 33static int ath5k_hw_post(struct ath5k_hw *ah)
@@ -92,8 +91,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
92} 91}
93 92
94/** 93/**
95 * ath5k_hw_init - Check if hw is supported and init the needed structs 94 * ath5k_hw_init() - Check if hw is supported and init the needed structs
96 *
97 * @ah: The &struct ath5k_hw associated with the device 95 * @ah: The &struct ath5k_hw associated with the device
98 * 96 *
99 * Check if the device is supported, perform a POST and initialize the needed 97 * Check if the device is supported, perform a POST and initialize the needed
@@ -298,7 +296,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
298 296
299 /* Reset SERDES to load new settings */ 297 /* Reset SERDES to load new settings */
300 ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET); 298 ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
301 mdelay(1); 299 usleep_range(1000, 1500);
302 } 300 }
303 301
304 /* Get misc capabilities */ 302 /* Get misc capabilities */
@@ -308,11 +306,6 @@ int ath5k_hw_init(struct ath5k_hw *ah)
308 goto err; 306 goto err;
309 } 307 }
310 308
311 if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
312 __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
313 __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
314 }
315
316 /* Crypto settings */ 309 /* Crypto settings */
317 common->keymax = (ah->ah_version == AR5K_AR5210 ? 310 common->keymax = (ah->ah_version == AR5K_AR5210 ?
318 AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211); 311 AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
@@ -349,8 +342,7 @@ err:
349} 342}
350 343
351/** 344/**
352 * ath5k_hw_deinit - Free the ath5k_hw struct 345 * ath5k_hw_deinit() - Free the &struct ath5k_hw
353 *
354 * @ah: The &struct ath5k_hw 346 * @ah: The &struct ath5k_hw
355 */ 347 */
356void ath5k_hw_deinit(struct ath5k_hw *ah) 348void ath5k_hw_deinit(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index b346d0492001..178a4dd10316 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -80,6 +80,11 @@ static int modparam_fastchanswitch;
80module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO); 80module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
81MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios."); 81MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
82 82
83static int ath5k_modparam_no_hw_rfkill_switch;
84module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
85 bool, S_IRUGO);
86MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
87
83 88
84/* Module info */ 89/* Module info */
85MODULE_AUTHOR("Jiri Slaby"); 90MODULE_AUTHOR("Jiri Slaby");
@@ -183,7 +188,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
183 { .bitrate = 540, 188 { .bitrate = 540,
184 .hw_value = ATH5K_RATE_CODE_54M, 189 .hw_value = ATH5K_RATE_CODE_54M,
185 .flags = 0 }, 190 .flags = 0 },
186 /* XR missing */
187}; 191};
188 192
189static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 193static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -721,21 +725,24 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
721 if (ret) 725 if (ret)
722 goto err_unmap; 726 goto err_unmap;
723 727
724 memset(mrr_rate, 0, sizeof(mrr_rate)); 728 /* Set up MRR descriptor */
725 memset(mrr_tries, 0, sizeof(mrr_tries)); 729 if (ah->ah_capabilities.cap_has_mrr_support) {
726 for (i = 0; i < 3; i++) { 730 memset(mrr_rate, 0, sizeof(mrr_rate));
727 rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); 731 memset(mrr_tries, 0, sizeof(mrr_tries));
728 if (!rate) 732 for (i = 0; i < 3; i++) {
729 break; 733 rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
734 if (!rate)
735 break;
730 736
731 mrr_rate[i] = rate->hw_value; 737 mrr_rate[i] = rate->hw_value;
732 mrr_tries[i] = info->control.rates[i + 1].count; 738 mrr_tries[i] = info->control.rates[i + 1].count;
733 } 739 }
734 740
735 ath5k_hw_setup_mrr_tx_desc(ah, ds, 741 ath5k_hw_setup_mrr_tx_desc(ah, ds,
736 mrr_rate[0], mrr_tries[0], 742 mrr_rate[0], mrr_tries[0],
737 mrr_rate[1], mrr_tries[1], 743 mrr_rate[1], mrr_tries[1],
738 mrr_rate[2], mrr_tries[2]); 744 mrr_rate[2], mrr_tries[2]);
745 }
739 746
740 ds->ds_link = 0; 747 ds->ds_link = 0;
741 ds->ds_data = bf->skbaddr; 748 ds->ds_data = bf->skbaddr;
@@ -1689,7 +1696,7 @@ ath5k_tasklet_tx(unsigned long data)
1689 struct ath5k_hw *ah = (void *)data; 1696 struct ath5k_hw *ah = (void *)data;
1690 1697
1691 for (i = 0; i < AR5K_NUM_TX_QUEUES; i++) 1698 for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
1692 if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i))) 1699 if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
1693 ath5k_tx_processq(ah, &ah->txqs[i]); 1700 ath5k_tx_processq(ah, &ah->txqs[i]);
1694 1701
1695 ah->tx_pending = false; 1702 ah->tx_pending = false;
@@ -2005,7 +2012,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
2005 ah->nexttbtt = nexttbtt; 2012 ah->nexttbtt = nexttbtt;
2006 2013
2007 intval |= AR5K_BEACON_ENA; 2014 intval |= AR5K_BEACON_ENA;
2008 ath5k_hw_init_beacon(ah, nexttbtt, intval); 2015 ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
2009 2016
2010 /* 2017 /*
2011 * debugging output last in order to preserve the time critical aspect 2018 * debugging output last in order to preserve the time critical aspect
@@ -2112,16 +2119,29 @@ static void
2112ath5k_intr_calibration_poll(struct ath5k_hw *ah) 2119ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2113{ 2120{
2114 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) && 2121 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
2115 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) { 2122 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
2116 /* run ANI only when full calibration is not active */ 2123 !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
2124
2125 /* Run ANI only when calibration is not active */
2126
2117 ah->ah_cal_next_ani = jiffies + 2127 ah->ah_cal_next_ani = jiffies +
2118 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); 2128 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2119 tasklet_schedule(&ah->ani_tasklet); 2129 tasklet_schedule(&ah->ani_tasklet);
2120 2130
2121 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) { 2131 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
2122 ah->ah_cal_next_full = jiffies + 2132 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
2123 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); 2133 !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
2124 tasklet_schedule(&ah->calib); 2134
2135 /* Run calibration only when another calibration
2136 * is not running.
2137 *
2138 * Note: This is for both full/short calibration,
2139 * if it's time for a full one, ath5k_calibrate_work will deal
2140 * with it. */
2141
2142 ah->ah_cal_next_short = jiffies +
2143 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
2144 ieee80211_queue_work(ah->hw, &ah->calib_work);
2125 } 2145 }
2126 /* we could use SWI to generate enough interrupts to meet our 2146 /* we could use SWI to generate enough interrupts to meet our
2127 * calibration interval requirements, if necessary: 2147 * calibration interval requirements, if necessary:
@@ -2149,69 +2169,110 @@ ath5k_intr(int irq, void *dev_id)
2149 enum ath5k_int status; 2169 enum ath5k_int status;
2150 unsigned int counter = 1000; 2170 unsigned int counter = 1000;
2151 2171
2172
2173 /*
2174 * If hw is not ready (or detached) and we get an
2175 * interrupt, or if we have no interrupts pending
2176 * (that means it's not for us) skip it.
2177 *
2178 * NOTE: Group 0/1 PCI interface registers are not
2179 * supported on WiSOCs, so we can't check for pending
2180 * interrupts (ISR belongs to another register group
2181 * so we are ok).
2182 */
2152 if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) || 2183 if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
2153 ((ath5k_get_bus_type(ah) != ATH_AHB) && 2184 ((ath5k_get_bus_type(ah) != ATH_AHB) &&
2154 !ath5k_hw_is_intr_pending(ah)))) 2185 !ath5k_hw_is_intr_pending(ah))))
2155 return IRQ_NONE; 2186 return IRQ_NONE;
2156 2187
2188 /** Main loop **/
2157 do { 2189 do {
2158 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ 2190 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
2191
2159 ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", 2192 ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
2160 status, ah->imask); 2193 status, ah->imask);
2194
2195 /*
2196 * Fatal hw error -> Log and reset
2197 *
2198 * Fatal errors are unrecoverable so we have to
2199 * reset the card. These errors include bus and
2200 * dma errors.
2201 */
2161 if (unlikely(status & AR5K_INT_FATAL)) { 2202 if (unlikely(status & AR5K_INT_FATAL)) {
2162 /* 2203
2163 * Fatal errors are unrecoverable.
2164 * Typically these are caused by DMA errors.
2165 */
2166 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2204 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2167 "fatal int, resetting\n"); 2205 "fatal int, resetting\n");
2168 ieee80211_queue_work(ah->hw, &ah->reset_work); 2206 ieee80211_queue_work(ah->hw, &ah->reset_work);
2207
2208 /*
2209 * RX Overrun -> Count and reset if needed
2210 *
2211 * Receive buffers are full. Either the bus is busy or
2212 * the CPU is not fast enough to process all received
2213 * frames.
2214 */
2169 } else if (unlikely(status & AR5K_INT_RXORN)) { 2215 } else if (unlikely(status & AR5K_INT_RXORN)) {
2216
2170 /* 2217 /*
2171 * Receive buffers are full. Either the bus is busy or
2172 * the CPU is not fast enough to process all received
2173 * frames.
2174 * Older chipsets need a reset to come out of this 2218 * Older chipsets need a reset to come out of this
2175 * condition, but we treat it as RX for newer chips. 2219 * condition, but we treat it as RX for newer chips.
2176 * We don't know exactly which versions need a reset - 2220 * We don't know exactly which versions need a reset
2177 * this guess is copied from the HAL. 2221 * this guess is copied from the HAL.
2178 */ 2222 */
2179 ah->stats.rxorn_intr++; 2223 ah->stats.rxorn_intr++;
2224
2180 if (ah->ah_mac_srev < AR5K_SREV_AR5212) { 2225 if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2181 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2226 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2182 "rx overrun, resetting\n"); 2227 "rx overrun, resetting\n");
2183 ieee80211_queue_work(ah->hw, &ah->reset_work); 2228 ieee80211_queue_work(ah->hw, &ah->reset_work);
2184 } else 2229 } else
2185 ath5k_schedule_rx(ah); 2230 ath5k_schedule_rx(ah);
2231
2186 } else { 2232 } else {
2233
2234 /* Software Beacon Alert -> Schedule beacon tasklet */
2187 if (status & AR5K_INT_SWBA) 2235 if (status & AR5K_INT_SWBA)
2188 tasklet_hi_schedule(&ah->beacontq); 2236 tasklet_hi_schedule(&ah->beacontq);
2189 2237
2190 if (status & AR5K_INT_RXEOL) { 2238 /*
2191 /* 2239 * No more RX descriptors -> Just count
2192 * NB: the hardware should re-read the link when 2240 *
2193 * RXE bit is written, but it doesn't work at 2241 * NB: the hardware should re-read the link when
2194 * least on older hardware revs. 2242 * RXE bit is written, but it doesn't work at
2195 */ 2243 * least on older hardware revs.
2244 */
2245 if (status & AR5K_INT_RXEOL)
2196 ah->stats.rxeol_intr++; 2246 ah->stats.rxeol_intr++;
2197 } 2247
2198 if (status & AR5K_INT_TXURN) { 2248
2199 /* bump tx trigger level */ 2249 /* TX Underrun -> Bump tx trigger level */
2250 if (status & AR5K_INT_TXURN)
2200 ath5k_hw_update_tx_triglevel(ah, true); 2251 ath5k_hw_update_tx_triglevel(ah, true);
2201 } 2252
2253 /* RX -> Schedule rx tasklet */
2202 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) 2254 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2203 ath5k_schedule_rx(ah); 2255 ath5k_schedule_rx(ah);
2204 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC 2256
2205 | AR5K_INT_TXERR | AR5K_INT_TXEOL)) 2257 /* TX -> Schedule tx tasklet */
2258 if (status & (AR5K_INT_TXOK
2259 | AR5K_INT_TXDESC
2260 | AR5K_INT_TXERR
2261 | AR5K_INT_TXEOL))
2206 ath5k_schedule_tx(ah); 2262 ath5k_schedule_tx(ah);
2207 if (status & AR5K_INT_BMISS) { 2263
2208 /* TODO */ 2264 /* Missed beacon -> TODO
2209 } 2265 if (status & AR5K_INT_BMISS)
2266 */
2267
2268 /* MIB event -> Update counters and notify ANI */
2210 if (status & AR5K_INT_MIB) { 2269 if (status & AR5K_INT_MIB) {
2211 ah->stats.mib_intr++; 2270 ah->stats.mib_intr++;
2212 ath5k_hw_update_mib_counters(ah); 2271 ath5k_hw_update_mib_counters(ah);
2213 ath5k_ani_mib_intr(ah); 2272 ath5k_ani_mib_intr(ah);
2214 } 2273 }
2274
2275 /* GPIO -> Notify RFKill layer */
2215 if (status & AR5K_INT_GPIO) 2276 if (status & AR5K_INT_GPIO)
2216 tasklet_schedule(&ah->rf_kill.toggleq); 2277 tasklet_schedule(&ah->rf_kill.toggleq);
2217 2278
@@ -2222,12 +2283,19 @@ ath5k_intr(int irq, void *dev_id)
2222 2283
2223 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); 2284 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2224 2285
2286 /*
2287 * Until we handle rx/tx interrupts mask them on IMR
2288 *
2289 * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
2290 * and unset after we 've handled the interrupts.
2291 */
2225 if (ah->rx_pending || ah->tx_pending) 2292 if (ah->rx_pending || ah->tx_pending)
2226 ath5k_set_current_imask(ah); 2293 ath5k_set_current_imask(ah);
2227 2294
2228 if (unlikely(!counter)) 2295 if (unlikely(!counter))
2229 ATH5K_WARN(ah, "too many interrupts, giving up for now\n"); 2296 ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
2230 2297
2298 /* Fire up calibration poll */
2231 ath5k_intr_calibration_poll(ah); 2299 ath5k_intr_calibration_poll(ah);
2232 2300
2233 return IRQ_HANDLED; 2301 return IRQ_HANDLED;
@@ -2238,41 +2306,58 @@ ath5k_intr(int irq, void *dev_id)
2238 * for temperature/environment changes. 2306 * for temperature/environment changes.
2239 */ 2307 */
2240static void 2308static void
2241ath5k_tasklet_calibrate(unsigned long data) 2309ath5k_calibrate_work(struct work_struct *work)
2242{ 2310{
2243 struct ath5k_hw *ah = (void *)data; 2311 struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2312 calib_work);
2313
2314 /* Should we run a full calibration ? */
2315 if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2316
2317 ah->ah_cal_next_full = jiffies +
2318 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2319 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2320
2321 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
2322 "running full calibration\n");
2323
2324 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2325 /*
2326 * Rfgain is out of bounds, reset the chip
2327 * to load new gain values.
2328 */
2329 ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2330 "got new rfgain, resetting\n");
2331 ieee80211_queue_work(ah->hw, &ah->reset_work);
2332 }
2333
2334 /* TODO: On full calibration we should stop TX here,
2335 * so that it doesn't interfere (mostly due to gain_f
2336 * calibration that messes with tx packets -see phy.c).
2337 *
2338 * NOTE: Stopping the queues from above is not enough
2339 * to stop TX but saves us from disconecting (at least
2340 * we don't lose packets). */
2341 ieee80211_stop_queues(ah->hw);
2342 } else
2343 ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
2244 2344
2245 /* Only full calibration for now */
2246 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2247 2345
2248 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", 2346 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2249 ieee80211_frequency_to_channel(ah->curchan->center_freq), 2347 ieee80211_frequency_to_channel(ah->curchan->center_freq),
2250 ah->curchan->hw_value); 2348 ah->curchan->hw_value);
2251 2349
2252 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2253 /*
2254 * Rfgain is out of bounds, reset the chip
2255 * to load new gain values.
2256 */
2257 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2258 ieee80211_queue_work(ah->hw, &ah->reset_work);
2259 }
2260 if (ath5k_hw_phy_calibrate(ah, ah->curchan)) 2350 if (ath5k_hw_phy_calibrate(ah, ah->curchan))
2261 ATH5K_ERR(ah, "calibration of channel %u failed\n", 2351 ATH5K_ERR(ah, "calibration of channel %u failed\n",
2262 ieee80211_frequency_to_channel( 2352 ieee80211_frequency_to_channel(
2263 ah->curchan->center_freq)); 2353 ah->curchan->center_freq));
2264 2354
2265 /* Noise floor calibration interrupts rx/tx path while I/Q calibration 2355 /* Clear calibration flags */
2266 * doesn't. 2356 if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
2267 * TODO: We should stop TX here, so that it doesn't interfere. 2357 ieee80211_wake_queues(ah->hw);
2268 * Note that stopping the queues is not enough to stop TX! */ 2358 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2269 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) { 2359 } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
2270 ah->ah_cal_next_nf = jiffies + 2360 ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
2271 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
2272 ath5k_hw_update_noise_floor(ah);
2273 }
2274
2275 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2276} 2361}
2277 2362
2278 2363
@@ -2407,8 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2407 if (ret) 2492 if (ret)
2408 goto err_irq; 2493 goto err_irq;
2409 2494
2410 /* set up multi-rate retry capabilities */ 2495 /* Set up multi-rate retry capabilities */
2411 if (ah->ah_version == AR5K_AR5212) { 2496 if (ah->ah_capabilities.cap_has_mrr_support) {
2412 hw->max_rates = 4; 2497 hw->max_rates = 4;
2413 hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT, 2498 hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
2414 AR5K_INIT_RETRY_LONG); 2499 AR5K_INIT_RETRY_LONG);
@@ -2544,15 +2629,22 @@ int ath5k_start(struct ieee80211_hw *hw)
2544 * and then setup of the interrupt mask. 2629 * and then setup of the interrupt mask.
2545 */ 2630 */
2546 ah->curchan = ah->hw->conf.channel; 2631 ah->curchan = ah->hw->conf.channel;
2547 ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | 2632 ah->imask = AR5K_INT_RXOK
2548 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | 2633 | AR5K_INT_RXERR
2549 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; 2634 | AR5K_INT_RXEOL
2635 | AR5K_INT_RXORN
2636 | AR5K_INT_TXDESC
2637 | AR5K_INT_TXEOL
2638 | AR5K_INT_FATAL
2639 | AR5K_INT_GLOBAL
2640 | AR5K_INT_MIB;
2550 2641
2551 ret = ath5k_reset(ah, NULL, false); 2642 ret = ath5k_reset(ah, NULL, false);
2552 if (ret) 2643 if (ret)
2553 goto done; 2644 goto done;
2554 2645
2555 ath5k_rfkill_hw_start(ah); 2646 if (!ath5k_modparam_no_hw_rfkill_switch)
2647 ath5k_rfkill_hw_start(ah);
2556 2648
2557 /* 2649 /*
2558 * Reset the key cache since some parts do not reset the 2650 * Reset the key cache since some parts do not reset the
@@ -2585,7 +2677,6 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
2585 ah->tx_pending = false; 2677 ah->tx_pending = false;
2586 tasklet_kill(&ah->rxtq); 2678 tasklet_kill(&ah->rxtq);
2587 tasklet_kill(&ah->txtq); 2679 tasklet_kill(&ah->txtq);
2588 tasklet_kill(&ah->calib);
2589 tasklet_kill(&ah->beacontq); 2680 tasklet_kill(&ah->beacontq);
2590 tasklet_kill(&ah->ani_tasklet); 2681 tasklet_kill(&ah->ani_tasklet);
2591} 2682}
@@ -2637,7 +2728,8 @@ void ath5k_stop(struct ieee80211_hw *hw)
2637 2728
2638 cancel_delayed_work_sync(&ah->tx_complete_work); 2729 cancel_delayed_work_sync(&ah->tx_complete_work);
2639 2730
2640 ath5k_rfkill_hw_stop(ah); 2731 if (!ath5k_modparam_no_hw_rfkill_switch)
2732 ath5k_rfkill_hw_stop(ah);
2641} 2733}
2642 2734
2643/* 2735/*
@@ -2689,9 +2781,24 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2689 2781
2690 ath5k_ani_init(ah, ani_mode); 2782 ath5k_ani_init(ah, ani_mode);
2691 2783
2692 ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100); 2784 /*
2693 ah->ah_cal_next_ani = jiffies; 2785 * Set calibration intervals
2694 ah->ah_cal_next_nf = jiffies; 2786 *
2787 * Note: We don't need to run calibration imediately
2788 * since some initial calibration is done on reset
2789 * even for fast channel switching. Also on scanning
2790 * this will get set again and again and it won't get
2791 * executed unless we connect somewhere and spend some
2792 * time on the channel (that's what calibration needs
2793 * anyway to be accurate).
2794 */
2795 ah->ah_cal_next_full = jiffies +
2796 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2797 ah->ah_cal_next_ani = jiffies +
2798 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2799 ah->ah_cal_next_short = jiffies +
2800 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
2801
2695 ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8); 2802 ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2696 2803
2697 /* clear survey data and cycle counters */ 2804 /* clear survey data and cycle counters */
@@ -2745,20 +2852,6 @@ ath5k_init(struct ieee80211_hw *hw)
2745 2852
2746 2853
2747 /* 2854 /*
2748 * Check if the MAC has multi-rate retry support.
2749 * We do this by trying to setup a fake extended
2750 * descriptor. MACs that don't have support will
2751 * return false w/o doing anything. MACs that do
2752 * support it will return true w/o doing anything.
2753 */
2754 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
2755
2756 if (ret < 0)
2757 goto err;
2758 if (ret > 0)
2759 __set_bit(ATH_STAT_MRRETRY, ah->status);
2760
2761 /*
2762 * Collect the channel list. The 802.11 layer 2855 * Collect the channel list. The 802.11 layer
2763 * is responsible for filtering this list based 2856 * is responsible for filtering this list based
2764 * on settings like the phy mode and regulatory 2857 * on settings like the phy mode and regulatory
@@ -2841,11 +2934,11 @@ ath5k_init(struct ieee80211_hw *hw)
2841 2934
2842 tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah); 2935 tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
2843 tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah); 2936 tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
2844 tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
2845 tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah); 2937 tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
2846 tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah); 2938 tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
2847 2939
2848 INIT_WORK(&ah->reset_work, ath5k_reset_work); 2940 INIT_WORK(&ah->reset_work, ath5k_reset_work);
2941 INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
2849 INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work); 2942 INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
2850 2943
2851 ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac); 2944 ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 810fba96702b..994169ad39cb 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -85,12 +85,19 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
85 caps->cap_range.range_2ghz_min = 2412; 85 caps->cap_range.range_2ghz_min = 2412;
86 caps->cap_range.range_2ghz_max = 2732; 86 caps->cap_range.range_2ghz_max = 2732;
87 87
88 if (AR5K_EEPROM_HDR_11B(ee_header)) 88 /* Override 2GHz modes on SoCs that need it
89 __set_bit(AR5K_MODE_11B, caps->cap_mode); 89 * NOTE: cap_needs_2GHz_ovr gets set from
90 90 * ath_ahb_probe */
91 if (AR5K_EEPROM_HDR_11G(ee_header) && 91 if (!caps->cap_needs_2GHz_ovr) {
92 ah->ah_version != AR5K_AR5211) 92 if (AR5K_EEPROM_HDR_11B(ee_header))
93 __set_bit(AR5K_MODE_11G, caps->cap_mode); 93 __set_bit(AR5K_MODE_11B,
94 caps->cap_mode);
95
96 if (AR5K_EEPROM_HDR_11G(ee_header) &&
97 ah->ah_version != AR5K_AR5211)
98 __set_bit(AR5K_MODE_11G,
99 caps->cap_mode);
100 }
94 } 101 }
95 } 102 }
96 103
@@ -103,12 +110,18 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
103 else 110 else
104 caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; 111 caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
105 112
106 /* newer hardware has PHY error counters */ 113 /* Newer hardware has PHY error counters */
107 if (ah->ah_mac_srev >= AR5K_SREV_AR5213A) 114 if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
108 caps->cap_has_phyerr_counters = true; 115 caps->cap_has_phyerr_counters = true;
109 else 116 else
110 caps->cap_has_phyerr_counters = false; 117 caps->cap_has_phyerr_counters = false;
111 118
119 /* MACs since AR5212 have MRR support */
120 if (ah->ah_version == AR5K_AR5212)
121 caps->cap_has_mrr_support = true;
122 else
123 caps->cap_has_mrr_support = false;
124
112 return 0; 125 return 0;
113} 126}
114 127
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index fce8c904eea9..8c5ce8b0c734 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -57,8 +57,9 @@
57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
58 * THE POSSIBILITY OF SUCH DAMAGES. 58 * THE POSSIBILITY OF SUCH DAMAGES.
59 */ 59 */
60#include <linux/export.h>
61#include <linux/moduleparam.h>
60 62
61#include <linux/module.h>
62#include <linux/seq_file.h> 63#include <linux/seq_file.h>
63#include <linux/list.h> 64#include <linux/list.h>
64#include "debug.h" 65#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 7f37df3125fd..0a3f916a1ef3 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -141,10 +141,10 @@ ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf);
141 141
142#include <linux/compiler.h> 142#include <linux/compiler.h>
143 143
144static inline void __attribute__ ((format (printf, 3, 4))) 144static inline __printf(3, 4) void
145ATH5K_DBG(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...) {} 145ATH5K_DBG(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...) {}
146 146
147static inline void __attribute__ ((format (printf, 3, 4))) 147static inline __printf(3, 4) void
148ATH5K_DBG_UNLIMIT(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...) 148ATH5K_DBG_UNLIMIT(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...)
149{} 149{}
150 150
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 7e88dda82221..f8bfa3ac2af0 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -26,20 +26,61 @@
26#include "debug.h" 26#include "debug.h"
27 27
28 28
29/**
30 * DOC: Hardware descriptor functions
31 *
32 * Here we handle the processing of the low-level hw descriptors
33 * that hw reads and writes via DMA for each TX and RX attempt (that means
34 * we can also have descriptors for failed TX/RX tries). We have two kind of
35 * descriptors for RX and TX, control descriptors tell the hw how to send or
36 * receive a packet where to read/write it from/to etc and status descriptors
37 * that contain information about how the packet was sent or received (errors
38 * included).
39 *
40 * Descriptor format is not exactly the same for each MAC chip version so we
41 * have function pointers on &struct ath5k_hw we initialize at runtime based on
42 * the chip used.
43 */
44
45
29/************************\ 46/************************\
30* TX Control descriptors * 47* TX Control descriptors *
31\************************/ 48\************************/
32 49
33/* 50/**
34 * Initialize the 2-word tx control descriptor on 5210/5211 51 * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
52 * @ah: The &struct ath5k_hw
53 * @desc: The &struct ath5k_desc
54 * @pkt_len: Frame length in bytes
55 * @hdr_len: Header length in bytes (only used on AR5210)
56 * @padsize: Any padding we've added to the frame length
57 * @type: One of enum ath5k_pkt_type
58 * @tx_power: Tx power in 0.5dB steps
59 * @tx_rate0: HW idx for transmission rate
60 * @tx_tries0: Max number of retransmissions
61 * @key_index: Index on key table to use for encryption
62 * @antenna_mode: Which antenna to use (0 for auto)
63 * @flags: One of AR5K_TXDESC_* flags (desc.h)
64 * @rtscts_rate: HW idx for RTS/CTS transmission rate
65 * @rtscts_duration: What to put on duration field on the header of RTS/CTS
66 *
67 * Internal function to initialize a 2-Word TX control descriptor
68 * found on AR5210 and AR5211 MACs chips.
69 *
70 * Returns 0 on success or -EINVAL on false input
35 */ 71 */
36static int 72static int
37ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 73ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
38 unsigned int pkt_len, unsigned int hdr_len, int padsize, 74 struct ath5k_desc *desc,
39 enum ath5k_pkt_type type, 75 unsigned int pkt_len, unsigned int hdr_len,
40 unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, 76 int padsize,
41 unsigned int key_index, unsigned int antenna_mode, unsigned int flags, 77 enum ath5k_pkt_type type,
42 unsigned int rtscts_rate, unsigned int rtscts_duration) 78 unsigned int tx_power,
79 unsigned int tx_rate0, unsigned int tx_tries0,
80 unsigned int key_index,
81 unsigned int antenna_mode,
82 unsigned int flags,
83 unsigned int rtscts_rate, unsigned int rtscts_duration)
43{ 84{
44 u32 frame_type; 85 u32 frame_type;
45 struct ath5k_hw_2w_tx_ctl *tx_ctl; 86 struct ath5k_hw_2w_tx_ctl *tx_ctl;
@@ -172,17 +213,40 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
172 return 0; 213 return 0;
173} 214}
174 215
175/* 216/**
176 * Initialize the 4-word tx control descriptor on 5212 217 * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
218 * @ah: The &struct ath5k_hw
219 * @desc: The &struct ath5k_desc
220 * @pkt_len: Frame length in bytes
221 * @hdr_len: Header length in bytes (only used on AR5210)
222 * @padsize: Any padding we've added to the frame length
223 * @type: One of enum ath5k_pkt_type
224 * @tx_power: Tx power in 0.5dB steps
225 * @tx_rate0: HW idx for transmission rate
226 * @tx_tries0: Max number of retransmissions
227 * @key_index: Index on key table to use for encryption
228 * @antenna_mode: Which antenna to use (0 for auto)
229 * @flags: One of AR5K_TXDESC_* flags (desc.h)
230 * @rtscts_rate: HW idx for RTS/CTS transmission rate
231 * @rtscts_duration: What to put on duration field on the header of RTS/CTS
232 *
233 * Internal function to initialize a 4-Word TX control descriptor
234 * found on AR5212 and later MACs chips.
235 *
236 * Returns 0 on success or -EINVAL on false input
177 */ 237 */
178static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, 238static int
179 struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, 239ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
180 int padsize, 240 struct ath5k_desc *desc,
181 enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, 241 unsigned int pkt_len, unsigned int hdr_len,
182 unsigned int tx_tries0, unsigned int key_index, 242 int padsize,
183 unsigned int antenna_mode, unsigned int flags, 243 enum ath5k_pkt_type type,
184 unsigned int rtscts_rate, 244 unsigned int tx_power,
185 unsigned int rtscts_duration) 245 unsigned int tx_rate0, unsigned int tx_tries0,
246 unsigned int key_index,
247 unsigned int antenna_mode,
248 unsigned int flags,
249 unsigned int rtscts_rate, unsigned int rtscts_duration)
186{ 250{
187 struct ath5k_hw_4w_tx_ctl *tx_ctl; 251 struct ath5k_hw_4w_tx_ctl *tx_ctl;
188 unsigned int frame_len; 252 unsigned int frame_len;
@@ -292,13 +356,29 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
292 return 0; 356 return 0;
293} 357}
294 358
295/* 359/**
296 * Initialize a 4-word multi rate retry tx control descriptor on 5212 360 * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
361 * @ah: The &struct ath5k_hw
362 * @desc: The &struct ath5k_desc
363 * @tx_rate1: HW idx for rate used on transmission series 1
364 * @tx_tries1: Max number of retransmissions for transmission series 1
365 * @tx_rate2: HW idx for rate used on transmission series 2
366 * @tx_tries2: Max number of retransmissions for transmission series 2
367 * @tx_rate3: HW idx for rate used on transmission series 3
368 * @tx_tries3: Max number of retransmissions for transmission series 3
369 *
370 * Multi rate retry (MRR) tx control descriptors are available only on AR5212
371 * MACs, they are part of the normal 4-word tx control descriptor (see above)
372 * but we handle them through a separate function for better abstraction.
373 *
374 * Returns 0 on success or -EINVAL on invalid input
297 */ 375 */
298int 376int
299ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 377ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
300 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, 378 struct ath5k_desc *desc,
301 u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3) 379 u_int tx_rate1, u_int tx_tries1,
380 u_int tx_rate2, u_int tx_tries2,
381 u_int tx_rate3, u_int tx_tries3)
302{ 382{
303 struct ath5k_hw_4w_tx_ctl *tx_ctl; 383 struct ath5k_hw_4w_tx_ctl *tx_ctl;
304 384
@@ -350,11 +430,16 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
350* TX Status descriptors * 430* TX Status descriptors *
351\***********************/ 431\***********************/
352 432
353/* 433/**
354 * Process the tx status descriptor on 5210/5211 434 * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
435 * @ah: The &struct ath5k_hw
436 * @desc: The &struct ath5k_desc
437 * @ts: The &struct ath5k_tx_status
355 */ 438 */
356static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, 439static int
357 struct ath5k_desc *desc, struct ath5k_tx_status *ts) 440ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
441 struct ath5k_desc *desc,
442 struct ath5k_tx_status *ts)
358{ 443{
359 struct ath5k_hw_2w_tx_ctl *tx_ctl; 444 struct ath5k_hw_2w_tx_ctl *tx_ctl;
360 struct ath5k_hw_tx_status *tx_status; 445 struct ath5k_hw_tx_status *tx_status;
@@ -399,11 +484,16 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
399 return 0; 484 return 0;
400} 485}
401 486
402/* 487/**
403 * Process a tx status descriptor on 5212 488 * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
489 * @ah: The &struct ath5k_hw
490 * @desc: The &struct ath5k_desc
491 * @ts: The &struct ath5k_tx_status
404 */ 492 */
405static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, 493static int
406 struct ath5k_desc *desc, struct ath5k_tx_status *ts) 494ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
495 struct ath5k_desc *desc,
496 struct ath5k_tx_status *ts)
407{ 497{
408 struct ath5k_hw_4w_tx_ctl *tx_ctl; 498 struct ath5k_hw_4w_tx_ctl *tx_ctl;
409 struct ath5k_hw_tx_status *tx_status; 499 struct ath5k_hw_tx_status *tx_status;
@@ -460,11 +550,17 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
460* RX Descriptors * 550* RX Descriptors *
461\****************/ 551\****************/
462 552
463/* 553/**
464 * Initialize an rx control descriptor 554 * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
555 * @ah: The &struct ath5k_hw
556 * @desc: The &struct ath5k_desc
557 * @size: RX buffer length in bytes
558 * @flags: One of AR5K_RXDESC_* flags
465 */ 559 */
466int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 560int
467 u32 size, unsigned int flags) 561ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
562 struct ath5k_desc *desc,
563 u32 size, unsigned int flags)
468{ 564{
469 struct ath5k_hw_rx_ctl *rx_ctl; 565 struct ath5k_hw_rx_ctl *rx_ctl;
470 566
@@ -491,11 +587,22 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
491 return 0; 587 return 0;
492} 588}
493 589
494/* 590/**
495 * Process the rx status descriptor on 5210/5211 591 * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
592 * @ah: The &struct ath5k_hw
593 * @desc: The &struct ath5k_desc
594 * @rs: The &struct ath5k_rx_status
595 *
596 * Internal function used to process an RX status descriptor
597 * on AR5210/5211 MAC.
598 *
599 * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
600 * frame yet.
496 */ 601 */
497static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah, 602static int
498 struct ath5k_desc *desc, struct ath5k_rx_status *rs) 603ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
604 struct ath5k_desc *desc,
605 struct ath5k_rx_status *rs)
499{ 606{
500 struct ath5k_hw_rx_status *rx_status; 607 struct ath5k_hw_rx_status *rx_status;
501 608
@@ -574,12 +681,22 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
574 return 0; 681 return 0;
575} 682}
576 683
577/* 684/**
578 * Process the rx status descriptor on 5212 685 * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
686 * @ah: The &struct ath5k_hw
687 * @desc: The &struct ath5k_desc
688 * @rs: The &struct ath5k_rx_status
689 *
690 * Internal function used to process an RX status descriptor
691 * on AR5212 and later MAC.
692 *
693 * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
694 * frame yet.
579 */ 695 */
580static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, 696static int
581 struct ath5k_desc *desc, 697ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
582 struct ath5k_rx_status *rs) 698 struct ath5k_desc *desc,
699 struct ath5k_rx_status *rs)
583{ 700{
584 struct ath5k_hw_rx_status *rx_status; 701 struct ath5k_hw_rx_status *rx_status;
585 u32 rxstat0, rxstat1; 702 u32 rxstat0, rxstat1;
@@ -646,10 +763,16 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
646* Attach * 763* Attach *
647\********/ 764\********/
648 765
649/* 766/**
650 * Init function pointers inside ath5k_hw struct 767 * ath5k_hw_init_desc_functions() - Init function pointers inside ah
768 * @ah: The &struct ath5k_hw
769 *
770 * Maps the internal descriptor functions to the function pointers on ah, used
771 * from above. This is used as an abstraction layer to handle the various chips
772 * the same way.
651 */ 773 */
652int ath5k_hw_init_desc_functions(struct ath5k_hw *ah) 774int
775ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
653{ 776{
654 if (ah->ah_version == AR5K_AR5212) { 777 if (ah->ah_version == AR5K_AR5212) {
655 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; 778 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index cfd529b548f3..8d6c01a49ea3 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -20,25 +20,30 @@
20 * RX/TX descriptor structures 20 * RX/TX descriptor structures
21 */ 21 */
22 22
23/* 23/**
24 * Common hardware RX control descriptor 24 * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
25 * @rx_control_0: RX control word 0
26 * @rx_control_1: RX control word 1
25 */ 27 */
26struct ath5k_hw_rx_ctl { 28struct ath5k_hw_rx_ctl {
27 u32 rx_control_0; /* RX control word 0 */ 29 u32 rx_control_0;
28 u32 rx_control_1; /* RX control word 1 */ 30 u32 rx_control_1;
29} __packed __aligned(4); 31} __packed __aligned(4);
30 32
31/* RX control word 1 fields/flags */ 33/* RX control word 1 fields/flags */
32#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */ 34#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
33#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */ 35#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
34 36
35/* 37/**
36 * Common hardware RX status descriptor 38 * struct ath5k_hw_rx_status - Common hardware RX status descriptor
39 * @rx_status_0: RX status word 0
40 * @rx_status_1: RX status word 1
41 *
37 * 5210, 5211 and 5212 differ only in the fields and flags defined below 42 * 5210, 5211 and 5212 differ only in the fields and flags defined below
38 */ 43 */
39struct ath5k_hw_rx_status { 44struct ath5k_hw_rx_status {
40 u32 rx_status_0; /* RX status word 0 */ 45 u32 rx_status_0;
41 u32 rx_status_1; /* RX status word 1 */ 46 u32 rx_status_1;
42} __packed __aligned(4); 47} __packed __aligned(4);
43 48
44/* 5210/5211 */ 49/* 5210/5211 */
@@ -98,17 +103,36 @@ struct ath5k_hw_rx_status {
98 103
99/** 104/**
100 * enum ath5k_phy_error_code - PHY Error codes 105 * enum ath5k_phy_error_code - PHY Error codes
106 * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
107 * @AR5K_RX_PHY_ERROR_TIMING: Timing error
108 * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
109 * @AR5K_RX_PHY_ERROR_RATE: Illegal rate
110 * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
111 * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
112 * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
113 * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
114 * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
115 * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
116 * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
117 * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
118 * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
119 * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
120 * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
121 * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
122 * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
123 * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
124 * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
125 * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
101 */ 126 */
102enum ath5k_phy_error_code { 127enum ath5k_phy_error_code {
103 AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */ 128 AR5K_RX_PHY_ERROR_UNDERRUN = 0,
104 AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */ 129 AR5K_RX_PHY_ERROR_TIMING = 1,
105 AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */ 130 AR5K_RX_PHY_ERROR_PARITY = 2,
106 AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */ 131 AR5K_RX_PHY_ERROR_RATE = 3,
107 AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */ 132 AR5K_RX_PHY_ERROR_LENGTH = 4,
108 AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */ 133 AR5K_RX_PHY_ERROR_RADAR = 5,
109 AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */ 134 AR5K_RX_PHY_ERROR_SERVICE = 6,
110 AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */ 135 AR5K_RX_PHY_ERROR_TOR = 7,
111 /* these are specific to the 5212 */
112 AR5K_RX_PHY_ERROR_OFDM_TIMING = 17, 136 AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
113 AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18, 137 AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
114 AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19, 138 AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
@@ -123,12 +147,14 @@ enum ath5k_phy_error_code {
123 AR5K_RX_PHY_ERROR_CCK_RESTART = 31, 147 AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
124}; 148};
125 149
126/* 150/**
127 * 5210/5211 hardware 2-word TX control descriptor 151 * struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor
152 * @tx_control_0: TX control word 0
153 * @tx_control_1: TX control word 1
128 */ 154 */
129struct ath5k_hw_2w_tx_ctl { 155struct ath5k_hw_2w_tx_ctl {
130 u32 tx_control_0; /* TX control word 0 */ 156 u32 tx_control_0;
131 u32 tx_control_1; /* TX control word 1 */ 157 u32 tx_control_1;
132} __packed __aligned(4); 158} __packed __aligned(4);
133 159
134/* TX control word 0 fields/flags */ 160/* TX control word 0 fields/flags */
@@ -177,14 +203,18 @@ struct ath5k_hw_2w_tx_ctl {
177#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4 203#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
178#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4 204#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
179 205
180/* 206/**
181 * 5212 hardware 4-word TX control descriptor 207 * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
208 * @tx_control_0: TX control word 0
209 * @tx_control_1: TX control word 1
210 * @tx_control_2: TX control word 2
211 * @tx_control_3: TX control word 3
182 */ 212 */
183struct ath5k_hw_4w_tx_ctl { 213struct ath5k_hw_4w_tx_ctl {
184 u32 tx_control_0; /* TX control word 0 */ 214 u32 tx_control_0;
185 u32 tx_control_1; /* TX control word 1 */ 215 u32 tx_control_1;
186 u32 tx_control_2; /* TX control word 2 */ 216 u32 tx_control_2;
187 u32 tx_control_3; /* TX control word 3 */ 217 u32 tx_control_3;
188} __packed __aligned(4); 218} __packed __aligned(4);
189 219
190/* TX control word 0 fields/flags */ 220/* TX control word 0 fields/flags */
@@ -238,12 +268,14 @@ struct ath5k_hw_4w_tx_ctl {
238#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */ 268#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
239#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20 269#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
240 270
241/* 271/**
242 * Common TX status descriptor 272 * struct ath5k_hw_tx_status - Common TX status descriptor
273 * @tx_status_0: TX status word 0
274 * @tx_status_1: TX status word 1
243 */ 275 */
244struct ath5k_hw_tx_status { 276struct ath5k_hw_tx_status {
245 u32 tx_status_0; /* TX status word 0 */ 277 u32 tx_status_0;
246 u32 tx_status_1; /* TX status word 1 */ 278 u32 tx_status_1;
247} __packed __aligned(4); 279} __packed __aligned(4);
248 280
249/* TX status word 0 fields/flags */ 281/* TX status word 0 fields/flags */
@@ -276,37 +308,47 @@ struct ath5k_hw_tx_status {
276#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */ 308#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
277#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */ 309#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
278 310
279/* 311/**
280 * 5210/5211 hardware TX descriptor 312 * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
313 * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
314 * @tx_stat: The &struct ath5k_hw_tx_status
281 */ 315 */
282struct ath5k_hw_5210_tx_desc { 316struct ath5k_hw_5210_tx_desc {
283 struct ath5k_hw_2w_tx_ctl tx_ctl; 317 struct ath5k_hw_2w_tx_ctl tx_ctl;
284 struct ath5k_hw_tx_status tx_stat; 318 struct ath5k_hw_tx_status tx_stat;
285} __packed __aligned(4); 319} __packed __aligned(4);
286 320
287/* 321/**
288 * 5212 hardware TX descriptor 322 * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
323 * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
324 * @tx_stat: The &struct ath5k_hw_tx_status
289 */ 325 */
290struct ath5k_hw_5212_tx_desc { 326struct ath5k_hw_5212_tx_desc {
291 struct ath5k_hw_4w_tx_ctl tx_ctl; 327 struct ath5k_hw_4w_tx_ctl tx_ctl;
292 struct ath5k_hw_tx_status tx_stat; 328 struct ath5k_hw_tx_status tx_stat;
293} __packed __aligned(4); 329} __packed __aligned(4);
294 330
295/* 331/**
296 * Common hardware RX descriptor 332 * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
333 * @rx_ctl: The &struct ath5k_hw_rx_ctl
334 * @rx_stat: The &struct ath5k_hw_rx_status
297 */ 335 */
298struct ath5k_hw_all_rx_desc { 336struct ath5k_hw_all_rx_desc {
299 struct ath5k_hw_rx_ctl rx_ctl; 337 struct ath5k_hw_rx_ctl rx_ctl;
300 struct ath5k_hw_rx_status rx_stat; 338 struct ath5k_hw_rx_status rx_stat;
301} __packed __aligned(4); 339} __packed __aligned(4);
302 340
303/* 341/**
304 * Atheros hardware DMA descriptor 342 * struct ath5k_desc - Atheros hardware DMA descriptor
343 * @ds_link: Physical address of the next descriptor
344 * @ds_data: Physical address of data buffer (skb)
345 * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
346 *
305 * This is read and written to by the hardware 347 * This is read and written to by the hardware
306 */ 348 */
307struct ath5k_desc { 349struct ath5k_desc {
308 u32 ds_link; /* physical address of the next descriptor */ 350 u32 ds_link;
309 u32 ds_data; /* physical address of data buffer (skb) */ 351 u32 ds_data;
310 352
311 union { 353 union {
312 struct ath5k_hw_5210_tx_desc ds_tx5210; 354 struct ath5k_hw_5210_tx_desc ds_tx5210;
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 2481f9c7f4b6..5cc9aa814697 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -20,16 +20,13 @@
20* DMA and interrupt masking functions * 20* DMA and interrupt masking functions *
21\*************************************/ 21\*************************************/
22 22
23/* 23/**
24 * dma.c - DMA and interrupt masking functions 24 * DOC: DMA and interrupt masking functions
25 * 25 *
26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and 26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
27 * handle queue setup for 5210 chipset (rest are handled on qcu.c). 27 * handle queue setup for 5210 chipset (rest are handled on qcu.c).
28 * Also we setup interrupt mask register (IMR) and read the various interrupt 28 * Also we setup interrupt mask register (IMR) and read the various interrupt
29 * status registers (ISR). 29 * status registers (ISR).
30 *
31 * TODO: Handle SISR on 5211+ and introduce a function to return the queue
32 * number that resulted the interrupt.
33 */ 30 */
34 31
35#include "ath5k.h" 32#include "ath5k.h"
@@ -42,22 +39,22 @@
42\*********/ 39\*********/
43 40
44/** 41/**
45 * ath5k_hw_start_rx_dma - Start DMA receive 42 * ath5k_hw_start_rx_dma() - Start DMA receive
46 *
47 * @ah: The &struct ath5k_hw 43 * @ah: The &struct ath5k_hw
48 */ 44 */
49void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) 45void
46ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
50{ 47{
51 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); 48 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
52 ath5k_hw_reg_read(ah, AR5K_CR); 49 ath5k_hw_reg_read(ah, AR5K_CR);
53} 50}
54 51
55/** 52/**
56 * ath5k_hw_stop_rx_dma - Stop DMA receive 53 * ath5k_hw_stop_rx_dma() - Stop DMA receive
57 *
58 * @ah: The &struct ath5k_hw 54 * @ah: The &struct ath5k_hw
59 */ 55 */
60static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) 56static int
57ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
61{ 58{
62 unsigned int i; 59 unsigned int i;
63 60
@@ -79,24 +76,24 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
79} 76}
80 77
81/** 78/**
82 * ath5k_hw_get_rxdp - Get RX Descriptor's address 79 * ath5k_hw_get_rxdp() - Get RX Descriptor's address
83 *
84 * @ah: The &struct ath5k_hw 80 * @ah: The &struct ath5k_hw
85 */ 81 */
86u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah) 82u32
83ath5k_hw_get_rxdp(struct ath5k_hw *ah)
87{ 84{
88 return ath5k_hw_reg_read(ah, AR5K_RXDP); 85 return ath5k_hw_reg_read(ah, AR5K_RXDP);
89} 86}
90 87
91/** 88/**
92 * ath5k_hw_set_rxdp - Set RX Descriptor's address 89 * ath5k_hw_set_rxdp() - Set RX Descriptor's address
93 *
94 * @ah: The &struct ath5k_hw 90 * @ah: The &struct ath5k_hw
95 * @phys_addr: RX descriptor address 91 * @phys_addr: RX descriptor address
96 * 92 *
97 * Returns -EIO if rx is active 93 * Returns -EIO if rx is active
98 */ 94 */
99int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) 95int
96ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
100{ 97{
101 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) { 98 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
102 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 99 ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
@@ -114,8 +111,7 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
114\**********/ 111\**********/
115 112
116/** 113/**
117 * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue 114 * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
118 *
119 * @ah: The &struct ath5k_hw 115 * @ah: The &struct ath5k_hw
120 * @queue: The hw queue number 116 * @queue: The hw queue number
121 * 117 *
@@ -128,7 +124,8 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
128 * NOTE: Must be called after setting up tx control descriptor for that 124 * NOTE: Must be called after setting up tx control descriptor for that
129 * queue (see below). 125 * queue (see below).
130 */ 126 */
131int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) 127int
128ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
132{ 129{
133 u32 tx_queue; 130 u32 tx_queue;
134 131
@@ -177,17 +174,16 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
177} 174}
178 175
179/** 176/**
180 * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue 177 * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
181 *
182 * @ah: The &struct ath5k_hw 178 * @ah: The &struct ath5k_hw
183 * @queue: The hw queue number 179 * @queue: The hw queue number
184 * 180 *
185 * Stop DMA transmit on a specific hw queue and drain queue so we don't 181 * Stop DMA transmit on a specific hw queue and drain queue so we don't
186 * have any pending frames. Returns -EBUSY if we still have pending frames, 182 * have any pending frames. Returns -EBUSY if we still have pending frames,
187 * -EINVAL if queue number is out of range or inactive. 183 * -EINVAL if queue number is out of range or inactive.
188 *
189 */ 184 */
190static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) 185static int
186ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
191{ 187{
192 unsigned int i = 40; 188 unsigned int i = 40;
193 u32 tx_queue, pending; 189 u32 tx_queue, pending;
@@ -320,14 +316,14 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
320} 316}
321 317
322/** 318/**
323 * ath5k_hw_stop_beacon_queue - Stop beacon queue 319 * ath5k_hw_stop_beacon_queue() - Stop beacon queue
324 * 320 * @ah: The &struct ath5k_hw
325 * @ah The &struct ath5k_hw 321 * @queue: The queue number
326 * @queue The queue number
327 * 322 *
328 * Returns -EIO if queue didn't stop 323 * Returns -EIO if queue didn't stop
329 */ 324 */
330int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) 325int
326ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
331{ 327{
332 int ret; 328 int ret;
333 ret = ath5k_hw_stop_tx_dma(ah, queue); 329 ret = ath5k_hw_stop_tx_dma(ah, queue);
@@ -340,8 +336,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
340} 336}
341 337
342/** 338/**
343 * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue 339 * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
344 *
345 * @ah: The &struct ath5k_hw 340 * @ah: The &struct ath5k_hw
346 * @queue: The hw queue number 341 * @queue: The hw queue number
347 * 342 *
@@ -352,7 +347,8 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
352 * 347 *
353 * XXX: Is TXDP read and clear ? 348 * XXX: Is TXDP read and clear ?
354 */ 349 */
355u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) 350u32
351ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
356{ 352{
357 u16 tx_reg; 353 u16 tx_reg;
358 354
@@ -382,10 +378,10 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
382} 378}
383 379
384/** 380/**
385 * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue 381 * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
386 *
387 * @ah: The &struct ath5k_hw 382 * @ah: The &struct ath5k_hw
388 * @queue: The hw queue number 383 * @queue: The hw queue number
384 * @phys_addr: The physical address
389 * 385 *
390 * Set TX descriptor's address for a specific queue. For 5210 we ignore 386 * Set TX descriptor's address for a specific queue. For 5210 we ignore
391 * the queue number and we use tx queue type since we only have 2 queues 387 * the queue number and we use tx queue type since we only have 2 queues
@@ -394,7 +390,8 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
394 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still 390 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
395 * active. 391 * active.
396 */ 392 */
397int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) 393int
394ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
398{ 395{
399 u16 tx_reg; 396 u16 tx_reg;
400 397
@@ -435,8 +432,7 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
435} 432}
436 433
437/** 434/**
438 * ath5k_hw_update_tx_triglevel - Update tx trigger level 435 * ath5k_hw_update_tx_triglevel() - Update tx trigger level
439 *
440 * @ah: The &struct ath5k_hw 436 * @ah: The &struct ath5k_hw
441 * @increase: Flag to force increase of trigger level 437 * @increase: Flag to force increase of trigger level
442 * 438 *
@@ -444,15 +440,15 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
444 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes 440 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
445 * the buffer and transmits its data. Lowering this results sending small 441 * the buffer and transmits its data. Lowering this results sending small
446 * frames more quickly but can lead to tx underruns, raising it a lot can 442 * frames more quickly but can lead to tx underruns, raising it a lot can
447 * result other problems (i think bmiss is related). Right now we start with 443 * result other problems. Right now we start with the lowest possible
448 * the lowest possible (64Bytes) and if we get tx underrun we increase it using 444 * (64Bytes) and if we get tx underrun we increase it using the increase
449 * the increase flag. Returns -EIO if we have reached maximum/minimum. 445 * flag. Returns -EIO if we have reached maximum/minimum.
450 * 446 *
451 * XXX: Link this with tx DMA size ? 447 * XXX: Link this with tx DMA size ?
452 * XXX: Use it to save interrupts ? 448 * XXX2: Use it to save interrupts ?
453 * TODO: Needs testing, i think it's related to bmiss...
454 */ 449 */
455int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) 450int
451ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
456{ 452{
457 u32 trigger_level, imr; 453 u32 trigger_level, imr;
458 int ret = -EIO; 454 int ret = -EIO;
@@ -498,21 +494,20 @@ done:
498\*******************/ 494\*******************/
499 495
500/** 496/**
501 * ath5k_hw_is_intr_pending - Check if we have pending interrupts 497 * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
502 *
503 * @ah: The &struct ath5k_hw 498 * @ah: The &struct ath5k_hw
504 * 499 *
505 * Check if we have pending interrupts to process. Returns 1 if we 500 * Check if we have pending interrupts to process. Returns 1 if we
506 * have pending interrupts and 0 if we haven't. 501 * have pending interrupts and 0 if we haven't.
507 */ 502 */
508bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) 503bool
504ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
509{ 505{
510 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; 506 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
511} 507}
512 508
513/** 509/**
514 * ath5k_hw_get_isr - Get interrupt status 510 * ath5k_hw_get_isr() - Get interrupt status
515 *
516 * @ah: The @struct ath5k_hw 511 * @ah: The @struct ath5k_hw
517 * @interrupt_mask: Driver's interrupt mask used to filter out 512 * @interrupt_mask: Driver's interrupt mask used to filter out
518 * interrupts in sw. 513 * interrupts in sw.
@@ -523,62 +518,162 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
523 * being mapped on some standard non hw-specific positions 518 * being mapped on some standard non hw-specific positions
524 * (check out &ath5k_int). 519 * (check out &ath5k_int).
525 * 520 *
526 * NOTE: We use read-and-clear register, so after this function is called ISR 521 * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
527 * is zeroed. 522 * function gets called are cleared on return.
528 */ 523 */
529int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) 524int
525ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
530{ 526{
531 u32 data; 527 u32 data = 0;
532 528
533 /* 529 /*
534 * Read interrupt status from the Interrupt Status register 530 * Read interrupt status from Primary Interrupt
535 * on 5210 531 * Register.
532 *
533 * Note: PISR/SISR Not available on 5210
536 */ 534 */
537 if (ah->ah_version == AR5K_AR5210) { 535 if (ah->ah_version == AR5K_AR5210) {
538 data = ath5k_hw_reg_read(ah, AR5K_ISR); 536 u32 isr = 0;
539 if (unlikely(data == AR5K_INT_NOCARD)) { 537 isr = ath5k_hw_reg_read(ah, AR5K_ISR);
540 *interrupt_mask = data; 538 if (unlikely(isr == AR5K_INT_NOCARD)) {
539 *interrupt_mask = isr;
541 return -ENODEV; 540 return -ENODEV;
542 } 541 }
543 } else { 542
544 /* 543 /*
545 * Read interrupt status from Interrupt 544 * Filter out the non-common bits from the interrupt
546 * Status Register shadow copy (Read And Clear) 545 * status.
547 *
548 * Note: PISR/SISR Not available on 5210
549 */ 546 */
550 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR); 547 *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
551 if (unlikely(data == AR5K_INT_NOCARD)) { 548
552 *interrupt_mask = data; 549 /* Hanlde INT_FATAL */
550 if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
551 | AR5K_ISR_DPERR)))
552 *interrupt_mask |= AR5K_INT_FATAL;
553
554 /*
555 * XXX: BMISS interrupts may occur after association.
556 * I found this on 5210 code but it needs testing. If this is
557 * true we should disable them before assoc and re-enable them
558 * after a successful assoc + some jiffies.
559 interrupt_mask &= ~AR5K_INT_BMISS;
560 */
561
562 data = isr;
563 } else {
564 u32 pisr = 0;
565 u32 pisr_clear = 0;
566 u32 sisr0 = 0;
567 u32 sisr1 = 0;
568 u32 sisr2 = 0;
569 u32 sisr3 = 0;
570 u32 sisr4 = 0;
571
572 /* Read PISR and SISRs... */
573 pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
574 if (unlikely(pisr == AR5K_INT_NOCARD)) {
575 *interrupt_mask = pisr;
553 return -ENODEV; 576 return -ENODEV;
554 } 577 }
555 }
556 578
557 /* 579 sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
558 * Get abstract interrupt mask (driver-compatible) 580 sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
559 */ 581 sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
560 *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr; 582 sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
583 sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
561 584
562 if (ah->ah_version != AR5K_AR5210) { 585 /*
563 u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2); 586 * PISR holds the logical OR of interrupt bits
587 * from SISR registers:
588 *
589 * TXOK and TXDESC -> Logical OR of TXOK and TXDESC
590 * per-queue bits on SISR0
591 *
592 * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
593 * per-queue bits on SISR1
594 *
595 * TXURN -> Logical OR of TXURN per-queue bits on SISR2
596 *
597 * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
598 *
599 * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
600 * BCN_TIMEOUT, CAB_TIMEOUT and DTIM
601 * (and TSFOOR ?) bits on SISR2
602 *
603 * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
604 * QCBRURN per-queue bits on SISR3
605 * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
606 *
607 * If we clean these bits on PISR we 'll also clear all
608 * related bits from SISRs, e.g. if we write the TXOK bit on
609 * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
610 * interrupt got fired for another queue while we were reading
611 * the interrupt registers and we write back the TXOK bit on
612 * PISR we 'll lose it. So make sure that we don't write back
613 * on PISR any bits that come from SISRs. Clearing them from
614 * SISRs will also clear PISR so no need to worry here.
615 */
564 616
565 /*HIU = Host Interface Unit (PCI etc)*/ 617 pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
566 if (unlikely(data & (AR5K_ISR_HIUERR)))
567 *interrupt_mask |= AR5K_INT_FATAL;
568 618
569 /*Beacon Not Ready*/ 619 /*
570 if (unlikely(data & (AR5K_ISR_BNR))) 620 * Write to clear them...
571 *interrupt_mask |= AR5K_INT_BNR; 621 * Note: This means that each bit we write back
622 * to the registers will get cleared, leaving the
623 * rest unaffected. So this won't affect new interrupts
624 * we didn't catch while reading/processing, we 'll get
625 * them next time get_isr gets called.
626 */
627 ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
628 ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
629 ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
630 ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
631 ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
632 ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
633 /* Flush previous write */
634 ath5k_hw_reg_read(ah, AR5K_PISR);
572 635
573 if (unlikely(sisr2 & (AR5K_SISR2_SSERR | 636 /*
574 AR5K_SISR2_DPERR | 637 * Filter out the non-common bits from the interrupt
575 AR5K_SISR2_MCABT))) 638 * status.
576 *interrupt_mask |= AR5K_INT_FATAL; 639 */
640 *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
641
642
643 /* We treat TXOK,TXDESC, TXERR and TXEOL
644 * the same way (schedule the tx tasklet)
645 * so we track them all together per queue */
646 if (pisr & AR5K_ISR_TXOK)
647 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
648 AR5K_SISR0_QCU_TXOK);
577 649
578 if (data & AR5K_ISR_TIM) 650 if (pisr & AR5K_ISR_TXDESC)
651 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
652 AR5K_SISR0_QCU_TXDESC);
653
654 if (pisr & AR5K_ISR_TXERR)
655 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
656 AR5K_SISR1_QCU_TXERR);
657
658 if (pisr & AR5K_ISR_TXEOL)
659 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
660 AR5K_SISR1_QCU_TXEOL);
661
662 /* Currently this is not much usefull since we treat
663 * all queues the same way if we get a TXURN (update
664 * tx trigger level) but we might need it later on*/
665 if (pisr & AR5K_ISR_TXURN)
666 ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
667 AR5K_SISR2_QCU_TXURN);
668
669 /* Misc Beacon related interrupts */
670
671 /* For AR5211 */
672 if (pisr & AR5K_ISR_TIM)
579 *interrupt_mask |= AR5K_INT_TIM; 673 *interrupt_mask |= AR5K_INT_TIM;
580 674
581 if (data & AR5K_ISR_BCNMISC) { 675 /* For AR5212+ */
676 if (pisr & AR5K_ISR_BCNMISC) {
582 if (sisr2 & AR5K_SISR2_TIM) 677 if (sisr2 & AR5K_SISR2_TIM)
583 *interrupt_mask |= AR5K_INT_TIM; 678 *interrupt_mask |= AR5K_INT_TIM;
584 if (sisr2 & AR5K_SISR2_DTIM) 679 if (sisr2 & AR5K_SISR2_DTIM)
@@ -591,63 +686,39 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
591 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; 686 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
592 } 687 }
593 688
594 if (data & AR5K_ISR_RXDOPPLER) 689 /* Below interrupts are unlikely to happen */
595 *interrupt_mask |= AR5K_INT_RX_DOPPLER; 690
596 if (data & AR5K_ISR_QCBRORN) { 691 /* HIU = Host Interface Unit (PCI etc)
692 * Can be one of MCABT, SSERR, DPERR from SISR2 */
693 if (unlikely(pisr & (AR5K_ISR_HIUERR)))
694 *interrupt_mask |= AR5K_INT_FATAL;
695
696 /*Beacon Not Ready*/
697 if (unlikely(pisr & (AR5K_ISR_BNR)))
698 *interrupt_mask |= AR5K_INT_BNR;
699
700 /* A queue got CBR overrun */
701 if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
597 *interrupt_mask |= AR5K_INT_QCBRORN; 702 *interrupt_mask |= AR5K_INT_QCBRORN;
598 ah->ah_txq_isr |= AR5K_REG_MS( 703 ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
599 ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), 704 AR5K_SISR3_QCBRORN);
600 AR5K_SISR3_QCBRORN);
601 } 705 }
602 if (data & AR5K_ISR_QCBRURN) { 706
707 /* A queue got CBR underrun */
708 if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
603 *interrupt_mask |= AR5K_INT_QCBRURN; 709 *interrupt_mask |= AR5K_INT_QCBRURN;
604 ah->ah_txq_isr |= AR5K_REG_MS( 710 ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
605 ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), 711 AR5K_SISR3_QCBRURN);
606 AR5K_SISR3_QCBRURN);
607 } 712 }
608 if (data & AR5K_ISR_QTRIG) { 713
714 /* A queue got triggered */
715 if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
609 *interrupt_mask |= AR5K_INT_QTRIG; 716 *interrupt_mask |= AR5K_INT_QTRIG;
610 ah->ah_txq_isr |= AR5K_REG_MS( 717 ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
611 ath5k_hw_reg_read(ah, AR5K_RAC_SISR4), 718 AR5K_SISR4_QTRIG);
612 AR5K_SISR4_QTRIG);
613 } 719 }
614 720
615 if (data & AR5K_ISR_TXOK) 721 data = pisr;
616 ah->ah_txq_isr |= AR5K_REG_MS(
617 ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
618 AR5K_SISR0_QCU_TXOK);
619
620 if (data & AR5K_ISR_TXDESC)
621 ah->ah_txq_isr |= AR5K_REG_MS(
622 ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
623 AR5K_SISR0_QCU_TXDESC);
624
625 if (data & AR5K_ISR_TXERR)
626 ah->ah_txq_isr |= AR5K_REG_MS(
627 ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
628 AR5K_SISR1_QCU_TXERR);
629
630 if (data & AR5K_ISR_TXEOL)
631 ah->ah_txq_isr |= AR5K_REG_MS(
632 ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
633 AR5K_SISR1_QCU_TXEOL);
634
635 if (data & AR5K_ISR_TXURN)
636 ah->ah_txq_isr |= AR5K_REG_MS(
637 ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
638 AR5K_SISR2_QCU_TXURN);
639 } else {
640 if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
641 | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
642 *interrupt_mask |= AR5K_INT_FATAL;
643
644 /*
645 * XXX: BMISS interrupts may occur after association.
646 * I found this on 5210 code but it needs testing. If this is
647 * true we should disable them before assoc and re-enable them
648 * after a successful assoc + some jiffies.
649 interrupt_mask &= ~AR5K_INT_BMISS;
650 */
651 } 722 }
652 723
653 /* 724 /*
@@ -661,8 +732,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
661} 732}
662 733
663/** 734/**
664 * ath5k_hw_set_imr - Set interrupt mask 735 * ath5k_hw_set_imr() - Set interrupt mask
665 *
666 * @ah: The &struct ath5k_hw 736 * @ah: The &struct ath5k_hw
667 * @new_mask: The new interrupt mask to be set 737 * @new_mask: The new interrupt mask to be set
668 * 738 *
@@ -670,7 +740,8 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
670 * ath5k_int bits to hw-specific bits to remove abstraction and writing 740 * ath5k_int bits to hw-specific bits to remove abstraction and writing
671 * Interrupt Mask Register. 741 * Interrupt Mask Register.
672 */ 742 */
673enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) 743enum ath5k_int
744ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
674{ 745{
675 enum ath5k_int old_mask, int_mask; 746 enum ath5k_int old_mask, int_mask;
676 747
@@ -697,16 +768,14 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
697 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) 768 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
698 & AR5K_SIMR2_QCU_TXURN; 769 & AR5K_SIMR2_QCU_TXURN;
699 770
771 /* Fatal interrupt abstraction for 5211+ */
700 if (new_mask & AR5K_INT_FATAL) { 772 if (new_mask & AR5K_INT_FATAL) {
701 int_mask |= AR5K_IMR_HIUERR; 773 int_mask |= AR5K_IMR_HIUERR;
702 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR 774 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
703 | AR5K_SIMR2_DPERR); 775 | AR5K_SIMR2_DPERR);
704 } 776 }
705 777
706 /*Beacon Not Ready*/ 778 /* Misc beacon related interrupts */
707 if (new_mask & AR5K_INT_BNR)
708 int_mask |= AR5K_INT_BNR;
709
710 if (new_mask & AR5K_INT_TIM) 779 if (new_mask & AR5K_INT_TIM)
711 int_mask |= AR5K_IMR_TIM; 780 int_mask |= AR5K_IMR_TIM;
712 781
@@ -721,8 +790,9 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
721 if (new_mask & AR5K_INT_CAB_TIMEOUT) 790 if (new_mask & AR5K_INT_CAB_TIMEOUT)
722 simr2 |= AR5K_SISR2_CAB_TIMEOUT; 791 simr2 |= AR5K_SISR2_CAB_TIMEOUT;
723 792
724 if (new_mask & AR5K_INT_RX_DOPPLER) 793 /*Beacon Not Ready*/
725 int_mask |= AR5K_IMR_RXDOPPLER; 794 if (new_mask & AR5K_INT_BNR)
795 int_mask |= AR5K_INT_BNR;
726 796
727 /* Note: Per queue interrupt masks 797 /* Note: Per queue interrupt masks
728 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */ 798 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */
@@ -730,10 +800,12 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
730 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); 800 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
731 801
732 } else { 802 } else {
803 /* Fatal interrupt abstraction for 5210 */
733 if (new_mask & AR5K_INT_FATAL) 804 if (new_mask & AR5K_INT_FATAL)
734 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT 805 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
735 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); 806 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
736 807
808 /* Only common interrupts left for 5210 (no SIMRs) */
737 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); 809 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
738 } 810 }
739 811
@@ -760,8 +832,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
760\********************/ 832\********************/
761 833
762/** 834/**
763 * ath5k_hw_dma_init - Initialize DMA unit 835 * ath5k_hw_dma_init() - Initialize DMA unit
764 *
765 * @ah: The &struct ath5k_hw 836 * @ah: The &struct ath5k_hw
766 * 837 *
767 * Set DMA size and pre-enable interrupts 838 * Set DMA size and pre-enable interrupts
@@ -770,7 +841,8 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
770 * 841 *
771 * XXX: Save/restore RXDP/TXDP registers ? 842 * XXX: Save/restore RXDP/TXDP registers ?
772 */ 843 */
773void ath5k_hw_dma_init(struct ath5k_hw *ah) 844void
845ath5k_hw_dma_init(struct ath5k_hw *ah)
774{ 846{
775 /* 847 /*
776 * Set Rx/Tx DMA Configuration 848 * Set Rx/Tx DMA Configuration
@@ -799,8 +871,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
799} 871}
800 872
801/** 873/**
802 * ath5k_hw_dma_stop - stop DMA unit 874 * ath5k_hw_dma_stop() - stop DMA unit
803 *
804 * @ah: The &struct ath5k_hw 875 * @ah: The &struct ath5k_hw
805 * 876 *
806 * Stop tx/rx DMA and interrupts. Returns 877 * Stop tx/rx DMA and interrupts. Returns
@@ -810,7 +881,8 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
810 * stuck frames on tx queues, only a reset 881 * stuck frames on tx queues, only a reset
811 * can fix that. 882 * can fix that.
812 */ 883 */
813int ath5k_hw_dma_stop(struct ath5k_hw *ah) 884int
885ath5k_hw_dma_stop(struct ath5k_hw *ah)
814{ 886{
815 int i, qmax, err; 887 int i, qmax, err;
816 err = 0; 888 err = 0;
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 859297811914..73d3dd8a306a 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -24,10 +24,33 @@
24#include "reg.h" 24#include "reg.h"
25#include "debug.h" 25#include "debug.h"
26 26
27/* 27
28 * Set led state 28/**
29 * DOC: GPIO/LED functions
30 *
31 * Here we control the 6 bidirectional GPIO pins provided by the hw.
32 * We can set a GPIO pin to be an input or an output pin on GPIO control
33 * register and then read or set its status from GPIO data input/output
34 * registers.
35 *
36 * We also control the two LED pins provided by the hw, LED_0 is our
37 * "power" LED and LED_1 is our "network activity" LED but many scenarios
38 * are available from hw. Vendors might also provide LEDs connected to the
39 * GPIO pins, we handle them through the LED subsystem on led.c
40 */
41
42
43/**
44 * ath5k_hw_set_ledstate() - Set led state
45 * @ah: The &struct ath5k_hw
46 * @state: One of AR5K_LED_*
47 *
48 * Used to set the LED blinking state. This only
49 * works for the LED connected to the LED_0, LED_1 pins,
50 * not the GPIO based.
29 */ 51 */
30void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state) 52void
53ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
31{ 54{
32 u32 led; 55 u32 led;
33 /*5210 has different led mode handling*/ 56 /*5210 has different led mode handling*/
@@ -74,10 +97,13 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
74 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210); 97 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
75} 98}
76 99
77/* 100/**
78 * Set GPIO inputs 101 * ath5k_hw_set_gpio_input() - Set GPIO inputs
102 * @ah: The &struct ath5k_hw
103 * @gpio: GPIO pin to set as input
79 */ 104 */
80int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio) 105int
106ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
81{ 107{
82 if (gpio >= AR5K_NUM_GPIO) 108 if (gpio >= AR5K_NUM_GPIO)
83 return -EINVAL; 109 return -EINVAL;
@@ -89,10 +115,13 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
89 return 0; 115 return 0;
90} 116}
91 117
92/* 118/**
93 * Set GPIO outputs 119 * ath5k_hw_set_gpio_output() - Set GPIO outputs
120 * @ah: The &struct ath5k_hw
121 * @gpio: The GPIO pin to set as output
94 */ 122 */
95int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio) 123int
124ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
96{ 125{
97 if (gpio >= AR5K_NUM_GPIO) 126 if (gpio >= AR5K_NUM_GPIO)
98 return -EINVAL; 127 return -EINVAL;
@@ -104,10 +133,13 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
104 return 0; 133 return 0;
105} 134}
106 135
107/* 136/**
108 * Get GPIO state 137 * ath5k_hw_get_gpio() - Get GPIO state
138 * @ah: The &struct ath5k_hw
139 * @gpio: The GPIO pin to read
109 */ 140 */
110u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio) 141u32
142ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
111{ 143{
112 if (gpio >= AR5K_NUM_GPIO) 144 if (gpio >= AR5K_NUM_GPIO)
113 return 0xffffffff; 145 return 0xffffffff;
@@ -117,10 +149,14 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
117 0x1; 149 0x1;
118} 150}
119 151
120/* 152/**
121 * Set GPIO state 153 * ath5k_hw_set_gpio() - Set GPIO state
154 * @ah: The &struct ath5k_hw
155 * @gpio: The GPIO pin to set
156 * @val: Value to set (boolean)
122 */ 157 */
123int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val) 158int
159ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
124{ 160{
125 u32 data; 161 u32 data;
126 162
@@ -138,10 +174,19 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
138 return 0; 174 return 0;
139} 175}
140 176
141/* 177/**
142 * Initialize the GPIO interrupt (RFKill switch) 178 * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
179 * @ah: The &struct ath5k_hw
180 * @gpio: The GPIO pin to use
181 * @interrupt_level: True to generate interrupt on active pin (high)
182 *
183 * This function is used to set up the GPIO interrupt for the hw RFKill switch.
184 * That switch is connected to a GPIO pin and it's number is stored on EEPROM.
185 * It can either open or close the circuit to indicate that we should disable
186 * RF/Wireless to save power (we also get that from EEPROM).
143 */ 187 */
144void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, 188void
189ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
145 u32 interrupt_level) 190 u32 interrupt_level)
146{ 191{
147 u32 data; 192 u32 data;
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 1ffecc0fd3ed..a1ea78e05b47 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -23,24 +23,27 @@
23#include "reg.h" 23#include "reg.h"
24#include "debug.h" 24#include "debug.h"
25 25
26/* 26/**
27 * Mode-independent initial register writes 27 * struct ath5k_ini - Mode-independent initial register writes
28 * @ini_register: Register address
29 * @ini_value: Default value
30 * @ini_mode: 0 to write 1 to read (and clear)
28 */ 31 */
29
30struct ath5k_ini { 32struct ath5k_ini {
31 u16 ini_register; 33 u16 ini_register;
32 u32 ini_value; 34 u32 ini_value;
33 35
34 enum { 36 enum {
35 AR5K_INI_WRITE = 0, /* Default */ 37 AR5K_INI_WRITE = 0, /* Default */
36 AR5K_INI_READ = 1, /* Cleared on read */ 38 AR5K_INI_READ = 1,
37 } ini_mode; 39 } ini_mode;
38}; 40};
39 41
40/* 42/**
41 * Mode specific initial register values 43 * struct ath5k_ini_mode - Mode specific initial register values
44 * @mode_register: Register address
45 * @mode_value: Set of values for each enum ath5k_driver_mode
42 */ 46 */
43
44struct ath5k_ini_mode { 47struct ath5k_ini_mode {
45 u16 mode_register; 48 u16 mode_register;
46 u32 mode_value[3]; 49 u32 mode_value[3];
@@ -386,11 +389,10 @@ static const struct ath5k_ini ar5211_ini[] = {
386 389
387/* Initial mode-specific settings for AR5211 390/* Initial mode-specific settings for AR5211
388 * 5211 supports OFDM-only g (draft g) but we 391 * 5211 supports OFDM-only g (draft g) but we
389 * need to test it ! 392 * need to test it ! */
390 */
391static const struct ath5k_ini_mode ar5211_ini_mode[] = { 393static const struct ath5k_ini_mode ar5211_ini_mode[] = {
392 { AR5K_TXCFG, 394 { AR5K_TXCFG,
393 /* A/XR B G */ 395 /* A B G */
394 { 0x00000015, 0x0000001d, 0x00000015 } }, 396 { 0x00000015, 0x0000001d, 0x00000015 } },
395 { AR5K_QUEUE_DFS_LOCAL_IFS(0), 397 { AR5K_QUEUE_DFS_LOCAL_IFS(0),
396 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 398 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
@@ -460,7 +462,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = {
460 { 0x00000010, 0x00000010, 0x00000010 } }, 462 { 0x00000010, 0x00000010, 0x00000010 } },
461}; 463};
462 464
463/* Initial register settings for AR5212 */ 465/* Initial register settings for AR5212 and newer chips */
464static const struct ath5k_ini ar5212_ini_common_start[] = { 466static const struct ath5k_ini ar5212_ini_common_start[] = {
465 { AR5K_RXDP, 0x00000000 }, 467 { AR5K_RXDP, 0x00000000 },
466 { AR5K_RXCFG, 0x00000005 }, 468 { AR5K_RXCFG, 0x00000005 },
@@ -724,7 +726,8 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
724 { 0x00000000, 0x00000000, 0x00000108 } }, 726 { 0x00000000, 0x00000000, 0x00000108 } },
725}; 727};
726 728
727/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */ 729/* Initial mode-specific settings for AR5212 + RF5111
730 * (Written after ar5212_ini) */
728static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { 731static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
729 { AR5K_TXCFG, 732 { AR5K_TXCFG,
730 /* A/XR B G */ 733 /* A/XR B G */
@@ -757,6 +760,7 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
757 { 0x1883800a, 0x1873800a, 0x1883800a } }, 760 { 0x1883800a, 0x1873800a, 0x1883800a } },
758}; 761};
759 762
763/* Common for all modes */
760static const struct ath5k_ini rf5111_ini_common_end[] = { 764static const struct ath5k_ini rf5111_ini_common_end[] = {
761 { AR5K_DCU_FP, 0x00000000 }, 765 { AR5K_DCU_FP, 0x00000000 },
762 { AR5K_PHY_AGC, 0x00000000 }, 766 { AR5K_PHY_AGC, 0x00000000 },
@@ -774,7 +778,9 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
774 { 0xa23c, 0x13c889af }, 778 { 0xa23c, 0x13c889af },
775}; 779};
776 780
777/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */ 781
782/* Initial mode-specific settings for AR5212 + RF5112
783 * (Written after ar5212_ini) */
778static const struct ath5k_ini_mode rf5112_ini_mode_end[] = { 784static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
779 { AR5K_TXCFG, 785 { AR5K_TXCFG,
780 /* A/XR B G */ 786 /* A/XR B G */
@@ -825,7 +831,9 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
825 { 0xa23c, 0x13c889af }, 831 { 0xa23c, 0x13c889af },
826}; 832};
827 833
828/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */ 834
835/* Initial mode-specific settings for RF5413/5414
836 * (Written after ar5212_ini) */
829static const struct ath5k_ini_mode rf5413_ini_mode_end[] = { 837static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
830 { AR5K_TXCFG, 838 { AR5K_TXCFG,
831 /* A/XR B G */ 839 /* A/XR B G */
@@ -963,7 +971,8 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
963 { 0xa384, 0xf3307ff0 }, 971 { 0xa384, 0xf3307ff0 },
964}; 972};
965 973
966/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */ 974/* Initial mode-specific settings for RF2413/2414
975 * (Written after ar5212_ini) */
967/* XXX: a mode ? */ 976/* XXX: a mode ? */
968static const struct ath5k_ini_mode rf2413_ini_mode_end[] = { 977static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
969 { AR5K_TXCFG, 978 { AR5K_TXCFG,
@@ -1085,7 +1094,8 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
1085 { 0xa384, 0xf3307ff0 }, 1094 { 0xa384, 0xf3307ff0 },
1086}; 1095};
1087 1096
1088/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */ 1097/* Initial mode-specific settings for RF2425
1098 * (Written after ar5212_ini) */
1089/* XXX: a mode ? */ 1099/* XXX: a mode ? */
1090static const struct ath5k_ini_mode rf2425_ini_mode_end[] = { 1100static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
1091 { AR5K_TXCFG, 1101 { AR5K_TXCFG,
@@ -1357,10 +1367,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
1357}; 1367};
1358 1368
1359 1369
1360/* 1370/**
1361 * Write initial register dump 1371 * ath5k_hw_ini_registers() - Write initial register dump common for all modes
1372 * @ah: The &struct ath5k_hw
1373 * @size: Dump size
1374 * @ini_regs: The array of &struct ath5k_ini
1375 * @skip_pcu: Skip PCU registers
1362 */ 1376 */
1363static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, 1377static void
1378ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
1364 const struct ath5k_ini *ini_regs, bool skip_pcu) 1379 const struct ath5k_ini *ini_regs, bool skip_pcu)
1365{ 1380{
1366 unsigned int i; 1381 unsigned int i;
@@ -1388,7 +1403,15 @@ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
1388 } 1403 }
1389} 1404}
1390 1405
1391static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah, 1406/**
1407 * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
1408 * @ah: The &struct ath5k_hw
1409 * @size: Dump size
1410 * @ini_mode: The array of &struct ath5k_ini_mode
1411 * @mode: One of enum ath5k_driver_mode
1412 */
1413static void
1414ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
1392 unsigned int size, const struct ath5k_ini_mode *ini_mode, 1415 unsigned int size, const struct ath5k_ini_mode *ini_mode,
1393 u8 mode) 1416 u8 mode)
1394{ 1417{
@@ -1402,7 +1425,17 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
1402 1425
1403} 1426}
1404 1427
1405int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu) 1428/**
1429 * ath5k_hw_write_initvals() - Write initial chip-specific register dump
1430 * @ah: The &struct ath5k_hw
1431 * @mode: One of enum ath5k_driver_mode
1432 * @skip_pcu: Skip PCU registers
1433 *
1434 * Write initial chip-specific register dump, to get the chipset on a
1435 * clean and ready-to-work state after warm reset.
1436 */
1437int
1438ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
1406{ 1439{
1407 /* 1440 /*
1408 * Write initial register settings 1441 * Write initial register settings
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index c1dff2ced044..849fa060ebc4 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -18,6 +18,7 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/pci-aspm.h> 19#include <linux/pci-aspm.h>
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/module.h>
21#include "../ath.h" 22#include "../ath.h"
22#include "ath5k.h" 23#include "ath5k.h"
23#include "debug.h" 24#include "debug.h"
@@ -97,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
97 0xffff); 98 0xffff);
98 return true; 99 return true;
99 } 100 }
100 udelay(15); 101 usleep_range(15, 20);
101 } 102 }
102 103
103 return false; 104 return false;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index a7eafa3edc21..cebfd6fd31d3 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -30,11 +30,47 @@
30#include "reg.h" 30#include "reg.h"
31#include "debug.h" 31#include "debug.h"
32 32
33/* 33/**
34 * DOC: Protocol Control Unit (PCU) functions
35 *
36 * Protocol control unit is responsible to maintain various protocol
37 * properties before a frame is send and after a frame is received to/from
38 * baseband. To be more specific, PCU handles:
39 *
40 * - Buffering of RX and TX frames (after QCU/DCUs)
41 *
42 * - Encrypting and decrypting (using the built-in engine)
43 *
44 * - Generating ACKs, RTS/CTS frames
45 *
46 * - Maintaining TSF
47 *
48 * - FCS
49 *
50 * - Updating beacon data (with TSF etc)
51 *
52 * - Generating virtual CCA
53 *
54 * - RX/Multicast filtering
55 *
56 * - BSSID filtering
57 *
58 * - Various statistics
59 *
60 * -Different operating modes: AP, STA, IBSS
61 *
62 * Note: Most of these functions can be tweaked/bypassed so you can do
63 * them on sw above for debugging or research. For more infos check out PCU
64 * registers on reg.h.
65 */
66
67/**
68 * DOC: ACK rates
69 *
34 * AR5212+ can use higher rates for ack transmission 70 * AR5212+ can use higher rates for ack transmission
35 * based on current tx rate instead of the base rate. 71 * based on current tx rate instead of the base rate.
36 * It does this to better utilize channel usage. 72 * It does this to better utilize channel usage.
37 * This is a mapping between G rates (that cover both 73 * There is a mapping between G rates (that cover both
38 * CCK and OFDM) and ack rates that we use when setting 74 * CCK and OFDM) and ack rates that we use when setting
39 * rate -> duration table. This mapping is hw-based so 75 * rate -> duration table. This mapping is hw-based so
40 * don't change anything. 76 * don't change anything.
@@ -63,17 +99,18 @@ static const unsigned int ack_rates_high[] =
63\*******************/ 99\*******************/
64 100
65/** 101/**
66 * ath5k_hw_get_frame_duration - Get tx time of a frame 102 * ath5k_hw_get_frame_duration() - Get tx time of a frame
67 *
68 * @ah: The &struct ath5k_hw 103 * @ah: The &struct ath5k_hw
69 * @len: Frame's length in bytes 104 * @len: Frame's length in bytes
70 * @rate: The @struct ieee80211_rate 105 * @rate: The @struct ieee80211_rate
106 * @shortpre: Indicate short preample
71 * 107 *
72 * Calculate tx duration of a frame given it's rate and length 108 * Calculate tx duration of a frame given it's rate and length
73 * It extends ieee80211_generic_frame_duration for non standard 109 * It extends ieee80211_generic_frame_duration for non standard
74 * bwmodes. 110 * bwmodes.
75 */ 111 */
76int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, 112int
113ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
77 int len, struct ieee80211_rate *rate, bool shortpre) 114 int len, struct ieee80211_rate *rate, bool shortpre)
78{ 115{
79 int sifs, preamble, plcp_bits, sym_time; 116 int sifs, preamble, plcp_bits, sym_time;
@@ -129,11 +166,11 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
129} 166}
130 167
131/** 168/**
132 * ath5k_hw_get_default_slottime - Get the default slot time for current mode 169 * ath5k_hw_get_default_slottime() - Get the default slot time for current mode
133 *
134 * @ah: The &struct ath5k_hw 170 * @ah: The &struct ath5k_hw
135 */ 171 */
136unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah) 172unsigned int
173ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
137{ 174{
138 struct ieee80211_channel *channel = ah->ah_current_channel; 175 struct ieee80211_channel *channel = ah->ah_current_channel;
139 unsigned int slot_time; 176 unsigned int slot_time;
@@ -160,11 +197,11 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
160} 197}
161 198
162/** 199/**
163 * ath5k_hw_get_default_sifs - Get the default SIFS for current mode 200 * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
164 *
165 * @ah: The &struct ath5k_hw 201 * @ah: The &struct ath5k_hw
166 */ 202 */
167unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah) 203unsigned int
204ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
168{ 205{
169 struct ieee80211_channel *channel = ah->ah_current_channel; 206 struct ieee80211_channel *channel = ah->ah_current_channel;
170 unsigned int sifs; 207 unsigned int sifs;
@@ -191,17 +228,17 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
191} 228}
192 229
193/** 230/**
194 * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics) 231 * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
195 *
196 * @ah: The &struct ath5k_hw 232 * @ah: The &struct ath5k_hw
197 * 233 *
198 * Reads MIB counters from PCU and updates sw statistics. Is called after a 234 * Reads MIB counters from PCU and updates sw statistics. Is called after a
199 * MIB interrupt, because one of these counters might have reached their maximum 235 * MIB interrupt, because one of these counters might have reached their maximum
200 * and triggered the MIB interrupt, to let us read and clear the counter. 236 * and triggered the MIB interrupt, to let us read and clear the counter.
201 * 237 *
202 * Is called in interrupt context! 238 * NOTE: Is called in interrupt context!
203 */ 239 */
204void ath5k_hw_update_mib_counters(struct ath5k_hw *ah) 240void
241ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
205{ 242{
206 struct ath5k_statistics *stats = &ah->stats; 243 struct ath5k_statistics *stats = &ah->stats;
207 244
@@ -219,10 +256,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
219\******************/ 256\******************/
220 257
221/** 258/**
222 * ath5k_hw_write_rate_duration - fill rate code to duration table 259 * ath5k_hw_write_rate_duration() - Fill rate code to duration table
223 * 260 * @ah: The &struct ath5k_hw
224 * @ah: the &struct ath5k_hw
225 * @mode: one of enum ath5k_driver_mode
226 * 261 *
227 * Write the rate code to duration table upon hw reset. This is a helper for 262 * Write the rate code to duration table upon hw reset. This is a helper for
228 * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on 263 * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
@@ -236,7 +271,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
236 * that include all OFDM and CCK rates. 271 * that include all OFDM and CCK rates.
237 * 272 *
238 */ 273 */
239static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah) 274static inline void
275ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
240{ 276{
241 struct ieee80211_rate *rate; 277 struct ieee80211_rate *rate;
242 unsigned int i; 278 unsigned int i;
@@ -280,12 +316,12 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
280} 316}
281 317
282/** 318/**
283 * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU 319 * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
284 *
285 * @ah: The &struct ath5k_hw 320 * @ah: The &struct ath5k_hw
286 * @timeout: Timeout in usec 321 * @timeout: Timeout in usec
287 */ 322 */
288static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) 323static int
324ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
289{ 325{
290 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK)) 326 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
291 <= timeout) 327 <= timeout)
@@ -298,12 +334,12 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
298} 334}
299 335
300/** 336/**
301 * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU 337 * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
302 *
303 * @ah: The &struct ath5k_hw 338 * @ah: The &struct ath5k_hw
304 * @timeout: Timeout in usec 339 * @timeout: Timeout in usec
305 */ 340 */
306static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) 341static int
342ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
307{ 343{
308 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS)) 344 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
309 <= timeout) 345 <= timeout)
@@ -321,14 +357,14 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
321\*******************/ 357\*******************/
322 358
323/** 359/**
324 * ath5k_hw_set_lladdr - Set station id 360 * ath5k_hw_set_lladdr() - Set station id
325 *
326 * @ah: The &struct ath5k_hw 361 * @ah: The &struct ath5k_hw
327 * @mac: The card's mac address 362 * @mac: The card's mac address (array of octets)
328 * 363 *
329 * Set station id on hw using the provided mac address 364 * Set station id on hw using the provided mac address
330 */ 365 */
331int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) 366int
367ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
332{ 368{
333 struct ath_common *common = ath5k_hw_common(ah); 369 struct ath_common *common = ath5k_hw_common(ah);
334 u32 low_id, high_id; 370 u32 low_id, high_id;
@@ -349,14 +385,14 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
349} 385}
350 386
351/** 387/**
352 * ath5k_hw_set_bssid - Set current BSSID on hw 388 * ath5k_hw_set_bssid() - Set current BSSID on hw
353 *
354 * @ah: The &struct ath5k_hw 389 * @ah: The &struct ath5k_hw
355 * 390 *
356 * Sets the current BSSID and BSSID mask we have from the 391 * Sets the current BSSID and BSSID mask we have from the
357 * common struct into the hardware 392 * common struct into the hardware
358 */ 393 */
359void ath5k_hw_set_bssid(struct ath5k_hw *ah) 394void
395ath5k_hw_set_bssid(struct ath5k_hw *ah)
360{ 396{
361 struct ath_common *common = ath5k_hw_common(ah); 397 struct ath_common *common = ath5k_hw_common(ah);
362 u16 tim_offset = 0; 398 u16 tim_offset = 0;
@@ -389,7 +425,23 @@ void ath5k_hw_set_bssid(struct ath5k_hw *ah)
389 ath5k_hw_enable_pspoll(ah, NULL, 0); 425 ath5k_hw_enable_pspoll(ah, NULL, 0);
390} 426}
391 427
392void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) 428/**
429 * ath5k_hw_set_bssid_mask() - Filter out bssids we listen
430 * @ah: The &struct ath5k_hw
431 * @mask: The BSSID mask to set (array of octets)
432 *
433 * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
434 * which bits of the interface's MAC address should be looked at when trying
435 * to decide which packets to ACK. In station mode and AP mode with a single
436 * BSS every bit matters since we lock to only one BSS. In AP mode with
437 * multiple BSSes (virtual interfaces) not every bit matters because hw must
438 * accept frames for all BSSes and so we tweak some bits of our mac address
439 * in order to have multiple BSSes.
440 *
441 * For more information check out ../hw.c of the common ath module.
442 */
443void
444ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
393{ 445{
394 struct ath_common *common = ath5k_hw_common(ah); 446 struct ath_common *common = ath5k_hw_common(ah);
395 447
@@ -400,18 +452,21 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
400 ath_hw_setbssidmask(common); 452 ath_hw_setbssidmask(common);
401} 453}
402 454
403/* 455/**
404 * Set multicast filter 456 * ath5k_hw_set_mcast_filter() - Set multicast filter
457 * @ah: The &struct ath5k_hw
458 * @filter0: Lower 32bits of muticast filter
459 * @filter1: Higher 16bits of multicast filter
405 */ 460 */
406void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) 461void
462ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
407{ 463{
408 ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0); 464 ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
409 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); 465 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
410} 466}
411 467
412/** 468/**
413 * ath5k_hw_get_rx_filter - Get current rx filter 469 * ath5k_hw_get_rx_filter() - Get current rx filter
414 *
415 * @ah: The &struct ath5k_hw 470 * @ah: The &struct ath5k_hw
416 * 471 *
417 * Returns the RX filter by reading rx filter and 472 * Returns the RX filter by reading rx filter and
@@ -420,7 +475,8 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
420 * and pass to the driver. For a list of frame types 475 * and pass to the driver. For a list of frame types
421 * check out reg.h. 476 * check out reg.h.
422 */ 477 */
423u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah) 478u32
479ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
424{ 480{
425 u32 data, filter = 0; 481 u32 data, filter = 0;
426 482
@@ -440,8 +496,7 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
440} 496}
441 497
442/** 498/**
443 * ath5k_hw_set_rx_filter - Set rx filter 499 * ath5k_hw_set_rx_filter() - Set rx filter
444 *
445 * @ah: The &struct ath5k_hw 500 * @ah: The &struct ath5k_hw
446 * @filter: RX filter mask (see reg.h) 501 * @filter: RX filter mask (see reg.h)
447 * 502 *
@@ -449,7 +504,8 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
449 * register on 5212 and newer chips so that we have proper PHY 504 * register on 5212 and newer chips so that we have proper PHY
450 * error reporting. 505 * error reporting.
451 */ 506 */
452void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) 507void
508ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
453{ 509{
454 u32 data = 0; 510 u32 data = 0;
455 511
@@ -493,13 +549,13 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
493#define ATH5K_MAX_TSF_READ 10 549#define ATH5K_MAX_TSF_READ 10
494 550
495/** 551/**
496 * ath5k_hw_get_tsf64 - Get the full 64bit TSF 552 * ath5k_hw_get_tsf64() - Get the full 64bit TSF
497 *
498 * @ah: The &struct ath5k_hw 553 * @ah: The &struct ath5k_hw
499 * 554 *
500 * Returns the current TSF 555 * Returns the current TSF
501 */ 556 */
502u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) 557u64
558ath5k_hw_get_tsf64(struct ath5k_hw *ah)
503{ 559{
504 u32 tsf_lower, tsf_upper1, tsf_upper2; 560 u32 tsf_lower, tsf_upper1, tsf_upper2;
505 int i; 561 int i;
@@ -536,28 +592,30 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
536 return ((u64)tsf_upper1 << 32) | tsf_lower; 592 return ((u64)tsf_upper1 << 32) | tsf_lower;
537} 593}
538 594
595#undef ATH5K_MAX_TSF_READ
596
539/** 597/**
540 * ath5k_hw_set_tsf64 - Set a new 64bit TSF 598 * ath5k_hw_set_tsf64() - Set a new 64bit TSF
541 *
542 * @ah: The &struct ath5k_hw 599 * @ah: The &struct ath5k_hw
543 * @tsf64: The new 64bit TSF 600 * @tsf64: The new 64bit TSF
544 * 601 *
545 * Sets the new TSF 602 * Sets the new TSF
546 */ 603 */
547void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) 604void
605ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
548{ 606{
549 ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32); 607 ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
550 ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32); 608 ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
551} 609}
552 610
553/** 611/**
554 * ath5k_hw_reset_tsf - Force a TSF reset 612 * ath5k_hw_reset_tsf() - Force a TSF reset
555 *
556 * @ah: The &struct ath5k_hw 613 * @ah: The &struct ath5k_hw
557 * 614 *
558 * Forces a TSF reset on PCU 615 * Forces a TSF reset on PCU
559 */ 616 */
560void ath5k_hw_reset_tsf(struct ath5k_hw *ah) 617void
618ath5k_hw_reset_tsf(struct ath5k_hw *ah)
561{ 619{
562 u32 val; 620 u32 val;
563 621
@@ -573,10 +631,17 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
573 ath5k_hw_reg_write(ah, val, AR5K_BEACON); 631 ath5k_hw_reg_write(ah, val, AR5K_BEACON);
574} 632}
575 633
576/* 634/**
577 * Initialize beacon timers 635 * ath5k_hw_init_beacon_timers() - Initialize beacon timers
636 * @ah: The &struct ath5k_hw
637 * @next_beacon: Next TBTT
638 * @interval: Current beacon interval
639 *
640 * This function is used to initialize beacon timers based on current
641 * operation mode and settings.
578 */ 642 */
579void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval) 643void
644ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
580{ 645{
581 u32 timer1, timer2, timer3; 646 u32 timer1, timer2, timer3;
582 647
@@ -655,8 +720,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
655} 720}
656 721
657/** 722/**
658 * ath5k_check_timer_win - Check if timer B is timer A + window 723 * ath5k_check_timer_win() - Check if timer B is timer A + window
659 *
660 * @a: timer a (before b) 724 * @a: timer a (before b)
661 * @b: timer b (after a) 725 * @b: timer b (after a)
662 * @window: difference between a and b 726 * @window: difference between a and b
@@ -686,12 +750,11 @@ ath5k_check_timer_win(int a, int b, int window, int intval)
686} 750}
687 751
688/** 752/**
689 * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct 753 * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
690 *
691 * @ah: The &struct ath5k_hw 754 * @ah: The &struct ath5k_hw
692 * @intval: beacon interval 755 * @intval: beacon interval
693 * 756 *
694 * This is a workaround for IBSS mode: 757 * This is a workaround for IBSS mode
695 * 758 *
696 * The need for this function arises from the fact that we have 4 separate 759 * The need for this function arises from the fact that we have 4 separate
697 * HW timer registers (TIMER0 - TIMER3), which are closely related to the 760 * HW timer registers (TIMER0 - TIMER3), which are closely related to the
@@ -746,14 +809,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
746} 809}
747 810
748/** 811/**
749 * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class 812 * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
750 *
751 * @ah: The &struct ath5k_hw 813 * @ah: The &struct ath5k_hw
752 * @coverage_class: IEEE 802.11 coverage class number 814 * @coverage_class: IEEE 802.11 coverage class number
753 * 815 *
754 * Sets IFS intervals and ACK/CTS timeouts for given coverage class. 816 * Sets IFS intervals and ACK/CTS timeouts for given coverage class.
755 */ 817 */
756void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) 818void
819ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
757{ 820{
758 /* As defined by IEEE 802.11-2007 17.3.8.6 */ 821 /* As defined by IEEE 802.11-2007 17.3.8.6 */
759 int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class; 822 int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
@@ -772,8 +835,7 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
772\***************************/ 835\***************************/
773 836
774/** 837/**
775 * ath5k_hw_start_rx_pcu - Start RX engine 838 * ath5k_hw_start_rx_pcu() - Start RX engine
776 *
777 * @ah: The &struct ath5k_hw 839 * @ah: The &struct ath5k_hw
778 * 840 *
779 * Starts RX engine on PCU so that hw can process RXed frames 841 * Starts RX engine on PCU so that hw can process RXed frames
@@ -781,32 +843,33 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
781 * 843 *
782 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma 844 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
783 */ 845 */
784void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) 846void
847ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
785{ 848{
786 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); 849 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
787} 850}
788 851
789/** 852/**
790 * at5k_hw_stop_rx_pcu - Stop RX engine 853 * at5k_hw_stop_rx_pcu() - Stop RX engine
791 *
792 * @ah: The &struct ath5k_hw 854 * @ah: The &struct ath5k_hw
793 * 855 *
794 * Stops RX engine on PCU 856 * Stops RX engine on PCU
795 */ 857 */
796void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) 858void
859ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
797{ 860{
798 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); 861 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
799} 862}
800 863
801/** 864/**
802 * ath5k_hw_set_opmode - Set PCU operating mode 865 * ath5k_hw_set_opmode() - Set PCU operating mode
803 *
804 * @ah: The &struct ath5k_hw 866 * @ah: The &struct ath5k_hw
805 * @op_mode: &enum nl80211_iftype operating mode 867 * @op_mode: One of enum nl80211_iftype
806 * 868 *
807 * Configure PCU for the various operating modes (AP/STA etc) 869 * Configure PCU for the various operating modes (AP/STA etc)
808 */ 870 */
809int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) 871int
872ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
810{ 873{
811 struct ath_common *common = ath5k_hw_common(ah); 874 struct ath_common *common = ath5k_hw_common(ah);
812 u32 pcu_reg, beacon_reg, low_id, high_id; 875 u32 pcu_reg, beacon_reg, low_id, high_id;
@@ -873,8 +936,17 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
873 return 0; 936 return 0;
874} 937}
875 938
876void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode, 939/**
877 u8 mode) 940 * ath5k_hw_pcu_init() - Initialize PCU
941 * @ah: The &struct ath5k_hw
942 * @op_mode: One of enum nl80211_iftype
943 * @mode: One of enum ath5k_driver_mode
944 *
945 * This function is used to initialize PCU by setting current
946 * operation mode and various other settings.
947 */
948void
949ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
878{ 950{
879 /* Set bssid and bssid mask */ 951 /* Set bssid and bssid mask */
880 ath5k_hw_set_bssid(ah); 952 ath5k_hw_set_bssid(ah);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 01cb72de44cb..e1f8613426a9 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * PHY functions
3 *
4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> 2 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com> 3 * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> 4 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
@@ -20,6 +18,10 @@
20 * 18 *
21 */ 19 */
22 20
21/***********************\
22* PHY related functions *
23\***********************/
24
23#include <linux/delay.h> 25#include <linux/delay.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
25#include <asm/unaligned.h> 27#include <asm/unaligned.h>
@@ -31,14 +33,53 @@
31#include "../regd.h" 33#include "../regd.h"
32 34
33 35
36/**
37 * DOC: PHY related functions
38 *
39 * Here we handle the low-level functions related to baseband
40 * and analog frontend (RF) parts. This is by far the most complex
41 * part of the hw code so make sure you know what you are doing.
42 *
43 * Here is a list of what this is all about:
44 *
45 * - Channel setting/switching
46 *
47 * - Automatic Gain Control (AGC) calibration
48 *
49 * - Noise Floor calibration
50 *
51 * - I/Q imbalance calibration (QAM correction)
52 *
53 * - Calibration due to thermal changes (gain_F)
54 *
55 * - Spur noise mitigation
56 *
57 * - RF/PHY initialization for the various operating modes and bwmodes
58 *
59 * - Antenna control
60 *
61 * - TX power control per channel/rate/packet type
62 *
63 * Also have in mind we never got documentation for most of these
64 * functions, what we have comes mostly from Atheros's code, reverse
65 * engineering and patent docs/presentations etc.
66 */
67
68
34/******************\ 69/******************\
35* Helper functions * 70* Helper functions *
36\******************/ 71\******************/
37 72
38/* 73/**
39 * Get the PHY Chip revision 74 * ath5k_hw_radio_revision() - Get the PHY Chip revision
75 * @ah: The &struct ath5k_hw
76 * @band: One of enum ieee80211_band
77 *
78 * Returns the revision number of a 2GHz, 5GHz or single chip
79 * radio.
40 */ 80 */
41u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band) 81u16
82ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
42{ 83{
43 unsigned int i; 84 unsigned int i;
44 u32 srev; 85 u32 srev;
@@ -58,7 +99,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
58 return 0; 99 return 0;
59 } 100 }
60 101
61 mdelay(2); 102 usleep_range(2000, 2500);
62 103
63 /* ...wait until PHY is ready and read the selected radio revision */ 104 /* ...wait until PHY is ready and read the selected radio revision */
64 ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34)); 105 ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
@@ -81,10 +122,16 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
81 return ret; 122 return ret;
82} 123}
83 124
84/* 125/**
85 * Check if a channel is supported 126 * ath5k_channel_ok() - Check if a channel is supported by the hw
127 * @ah: The &struct ath5k_hw
128 * @channel: The &struct ieee80211_channel
129 *
130 * Note: We don't do any regulatory domain checks here, it's just
131 * a sanity check.
86 */ 132 */
87bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel) 133bool
134ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
88{ 135{
89 u16 freq = channel->center_freq; 136 u16 freq = channel->center_freq;
90 137
@@ -101,7 +148,13 @@ bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
101 return false; 148 return false;
102} 149}
103 150
104bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, 151/**
152 * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
153 * @ah: The &struct ath5k_hw
154 * @channel: The &struct ieee80211_channel
155 */
156bool
157ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
105 struct ieee80211_channel *channel) 158 struct ieee80211_channel *channel)
106{ 159{
107 u8 refclk_freq; 160 u8 refclk_freq;
@@ -122,11 +175,20 @@ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
122 return false; 175 return false;
123} 176}
124 177
125/* 178/**
126 * Used to modify RF Banks before writing them to AR5K_RF_BUFFER 179 * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
180 * @ah: The &struct ath5k_hw
181 * @rf_regs: The struct ath5k_rf_reg
182 * @val: New value
183 * @reg_id: RF register ID
184 * @set: Indicate we need to swap data
185 *
186 * This is an internal function used to modify RF Banks before
187 * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
188 * infos.
127 */ 189 */
128static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah, 190static unsigned int
129 const struct ath5k_rf_reg *rf_regs, 191ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
130 u32 val, u8 reg_id, bool set) 192 u32 val, u8 reg_id, bool set)
131{ 193{
132 const struct ath5k_rf_reg *rfreg = NULL; 194 const struct ath5k_rf_reg *rfreg = NULL;
@@ -204,8 +266,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
204} 266}
205 267
206/** 268/**
207 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212 269 * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
208 *
209 * @ah: the &struct ath5k_hw 270 * @ah: the &struct ath5k_hw
210 * @channel: the currently set channel upon reset 271 * @channel: the currently set channel upon reset
211 * 272 *
@@ -216,10 +277,11 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
216 * mantissa and provide these values on hw. 277 * mantissa and provide these values on hw.
217 * 278 *
218 * For more infos i think this patent is related 279 * For more infos i think this patent is related
219 * http://www.freepatentsonline.com/7184495.html 280 * "http://www.freepatentsonline.com/7184495.html"
220 */ 281 */
221static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, 282static inline int
222 struct ieee80211_channel *channel) 283ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
284 struct ieee80211_channel *channel)
223{ 285{
224 /* Get exponent and mantissa and set it */ 286 /* Get exponent and mantissa and set it */
225 u32 coef_scaled, coef_exp, coef_man, 287 u32 coef_scaled, coef_exp, coef_man,
@@ -278,6 +340,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
278 return 0; 340 return 0;
279} 341}
280 342
343/**
344 * ath5k_hw_phy_disable() - Disable PHY
345 * @ah: The &struct ath5k_hw
346 */
281int ath5k_hw_phy_disable(struct ath5k_hw *ah) 347int ath5k_hw_phy_disable(struct ath5k_hw *ah)
282{ 348{
283 /*Just a try M.F.*/ 349 /*Just a try M.F.*/
@@ -286,10 +352,13 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
286 return 0; 352 return 0;
287} 353}
288 354
289/* 355/**
290 * Wait for synth to settle 356 * ath5k_hw_wait_for_synth() - Wait for synth to settle
357 * @ah: The &struct ath5k_hw
358 * @channel: The &struct ieee80211_channel
291 */ 359 */
292static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, 360static void
361ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
293 struct ieee80211_channel *channel) 362 struct ieee80211_channel *channel)
294{ 363{
295 /* 364 /*
@@ -308,9 +377,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
308 delay = delay << 2; 377 delay = delay << 2;
309 /* XXX: /2 on turbo ? Let's be safe 378 /* XXX: /2 on turbo ? Let's be safe
310 * for now */ 379 * for now */
311 udelay(100 + delay); 380 usleep_range(100 + delay, 100 + (2 * delay));
312 } else { 381 } else {
313 mdelay(1); 382 usleep_range(1000, 1500);
314 } 383 }
315} 384}
316 385
@@ -319,7 +388,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
319* RF Gain optimization * 388* RF Gain optimization *
320\**********************/ 389\**********************/
321 390
322/* 391/**
392 * DOC: RF Gain optimization
393 *
323 * This code is used to optimize RF gain on different environments 394 * This code is used to optimize RF gain on different environments
324 * (temperature mostly) based on feedback from a power detector. 395 * (temperature mostly) based on feedback from a power detector.
325 * 396 *
@@ -328,22 +399,22 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
328 * no gain optimization ladder-. 399 * no gain optimization ladder-.
329 * 400 *
330 * For more infos check out this patent doc 401 * For more infos check out this patent doc
331 * http://www.freepatentsonline.com/7400691.html 402 * "http://www.freepatentsonline.com/7400691.html"
332 * 403 *
333 * This paper describes power drops as seen on the receiver due to 404 * This paper describes power drops as seen on the receiver due to
334 * probe packets 405 * probe packets
335 * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues 406 * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
336 * %20of%20Power%20Control.pdf 407 * %20of%20Power%20Control.pdf"
337 * 408 *
338 * And this is the MadWiFi bug entry related to the above 409 * And this is the MadWiFi bug entry related to the above
339 * http://madwifi-project.org/ticket/1659 410 * "http://madwifi-project.org/ticket/1659"
340 * with various measurements and diagrams 411 * with various measurements and diagrams
341 *
342 * TODO: Deal with power drops due to probes by setting an appropriate
343 * tx power on the probe packets ! Make this part of the calibration process.
344 */ 412 */
345 413
346/* Initialize ah_gain during attach */ 414/**
415 * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
416 * @ah: The &struct ath5k_hw
417 */
347int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah) 418int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
348{ 419{
349 /* Initialize the gain optimization values */ 420 /* Initialize the gain optimization values */
@@ -367,17 +438,21 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
367 return 0; 438 return 0;
368} 439}
369 440
370/* Schedule a gain probe check on the next transmitted packet. 441/**
442 * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
443 * @ah: The &struct ath5k_hw
444 *
445 * Schedules a gain probe check on the next transmitted packet.
371 * That means our next packet is going to be sent with lower 446 * That means our next packet is going to be sent with lower
372 * tx power and a Peak to Average Power Detector (PAPD) will try 447 * tx power and a Peak to Average Power Detector (PAPD) will try
373 * to measure the gain. 448 * to measure the gain.
374 * 449 *
375 * XXX: How about forcing a tx packet (bypassing PCU arbitrator etc) 450 * TODO: Force a tx packet (bypassing PCU arbitrator etc)
376 * just after we enable the probe so that we don't mess with 451 * just after we enable the probe so that we don't mess with
377 * standard traffic ? Maybe it's time to use sw interrupts and 452 * standard traffic.
378 * a probe tasklet !!!
379 */ 453 */
380static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah) 454static void
455ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
381{ 456{
382 457
383 /* Skip if gain calibration is inactive or 458 /* Skip if gain calibration is inactive or
@@ -395,9 +470,15 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
395 470
396} 471}
397 472
398/* Calculate gain_F measurement correction 473/**
399 * based on the current step for RF5112 rev. 2 */ 474 * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
400static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah) 475 * @ah: The &struct ath5k_hw
476 *
477 * Calculate Gain_F measurement correction
478 * based on the current step for RF5112 rev. 2
479 */
480static u32
481ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
401{ 482{
402 u32 mix, step; 483 u32 mix, step;
403 u32 *rf; 484 u32 *rf;
@@ -450,11 +531,19 @@ static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
450 return ah->ah_gain.g_f_corr; 531 return ah->ah_gain.g_f_corr;
451} 532}
452 533
453/* Check if current gain_F measurement is in the range of our 534/**
535 * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
536 * @ah: The &struct ath5k_hw
537 *
538 * Check if current gain_F measurement is in the range of our
454 * power detector windows. If we get a measurement outside range 539 * power detector windows. If we get a measurement outside range
455 * we know it's not accurate (detectors can't measure anything outside 540 * we know it's not accurate (detectors can't measure anything outside
456 * their detection window) so we must ignore it */ 541 * their detection window) so we must ignore it.
457static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah) 542 *
543 * Returns true if readback was O.K. or false on failure
544 */
545static bool
546ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
458{ 547{
459 const struct ath5k_rf_reg *rf_regs; 548 const struct ath5k_rf_reg *rf_regs;
460 u32 step, mix_ovr, level[4]; 549 u32 step, mix_ovr, level[4];
@@ -506,9 +595,15 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
506 ah->ah_gain.g_current <= level[3]); 595 ah->ah_gain.g_current <= level[3]);
507} 596}
508 597
509/* Perform gain_F adjustment by choosing the right set 598/**
510 * of parameters from RF gain optimization ladder */ 599 * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
511static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah) 600 * @ah: The &struct ath5k_hw
601 *
602 * Choose the right target gain based on current gain
603 * and RF gain optimization ladder
604 */
605static s8
606ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
512{ 607{
513 const struct ath5k_gain_opt *go; 608 const struct ath5k_gain_opt *go;
514 const struct ath5k_gain_opt_step *g_step; 609 const struct ath5k_gain_opt_step *g_step;
@@ -572,13 +667,18 @@ done:
572 return ret; 667 return ret;
573} 668}
574 669
575/* Main callback for thermal RF gain calibration engine 670/**
671 * ath5k_hw_gainf_calibrate() - Do a gain_F calibration
672 * @ah: The &struct ath5k_hw
673 *
674 * Main callback for thermal RF gain calibration engine
576 * Check for a new gain reading and schedule an adjustment 675 * Check for a new gain reading and schedule an adjustment
577 * if needed. 676 * if needed.
578 * 677 *
579 * TODO: Use sw interrupt to schedule reset if gain_F needs 678 * Returns one of enum ath5k_rfgain codes
580 * adjustment */ 679 */
581enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah) 680enum ath5k_rfgain
681ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
582{ 682{
583 u32 data, type; 683 u32 data, type;
584 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 684 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -638,10 +738,18 @@ done:
638 return ah->ah_gain.g_state; 738 return ah->ah_gain.g_state;
639} 739}
640 740
641/* Write initial RF gain table to set the RF sensitivity 741/**
642 * this one works on all RF chips and has nothing to do 742 * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
643 * with gain_F calibration */ 743 * @ah: The &struct ath5k_hw
644static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band) 744 * @band: One of enum ieee80211_band
745 *
746 * Write initial RF gain table to set the RF sensitivity.
747 *
748 * NOTE: This one works on all RF chips and has nothing to do
749 * with Gain_F calibration
750 */
751static int
752ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
645{ 753{
646 const struct ath5k_ini_rfgain *ath5k_rfg; 754 const struct ath5k_ini_rfgain *ath5k_rfg;
647 unsigned int i, size, index; 755 unsigned int i, size, index;
@@ -688,16 +796,23 @@ static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
688} 796}
689 797
690 798
691
692/********************\ 799/********************\
693* RF Registers setup * 800* RF Registers setup *
694\********************/ 801\********************/
695 802
696/* 803/**
697 * Setup RF registers by writing RF buffer on hw 804 * ath5k_hw_rfregs_init() - Initialize RF register settings
805 * @ah: The &struct ath5k_hw
806 * @channel: The &struct ieee80211_channel
807 * @mode: One of enum ath5k_driver_mode
808 *
809 * Setup RF registers by writing RF buffer on hw. For
810 * more infos on this, check out rfbuffer.h
698 */ 811 */
699static int ath5k_hw_rfregs_init(struct ath5k_hw *ah, 812static int
700 struct ieee80211_channel *channel, unsigned int mode) 813ath5k_hw_rfregs_init(struct ath5k_hw *ah,
814 struct ieee80211_channel *channel,
815 unsigned int mode)
701{ 816{
702 const struct ath5k_rf_reg *rf_regs; 817 const struct ath5k_rf_reg *rf_regs;
703 const struct ath5k_ini_rfbuffer *ini_rfb; 818 const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -1055,19 +1170,18 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
1055 PHY/RF channel functions 1170 PHY/RF channel functions
1056\**************************/ 1171\**************************/
1057 1172
1058/* 1173/**
1059 * Conversion needed for RF5110 1174 * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
1175 * @channel: The &struct ieee80211_channel
1176 *
1177 * Map channel frequency to IEEE channel number and convert it
1178 * to an internal channel value used by the RF5110 chipset.
1060 */ 1179 */
1061static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel) 1180static u32
1181ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
1062{ 1182{
1063 u32 athchan; 1183 u32 athchan;
1064 1184
1065 /*
1066 * Convert IEEE channel/MHz to an internal channel value used
1067 * by the AR5210 chipset. This has not been verified with
1068 * newer chipsets like the AR5212A who have a completely
1069 * different RF/PHY part.
1070 */
1071 athchan = (ath5k_hw_bitswap( 1185 athchan = (ath5k_hw_bitswap(
1072 (ieee80211_frequency_to_channel( 1186 (ieee80211_frequency_to_channel(
1073 channel->center_freq) - 24) / 2, 5) 1187 channel->center_freq) - 24) / 2, 5)
@@ -1075,10 +1189,13 @@ static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
1075 return athchan; 1189 return athchan;
1076} 1190}
1077 1191
1078/* 1192/**
1079 * Set channel on RF5110 1193 * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
1194 * @ah: The &struct ath5k_hw
1195 * @channel: The &struct ieee80211_channel
1080 */ 1196 */
1081static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah, 1197static int
1198ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
1082 struct ieee80211_channel *channel) 1199 struct ieee80211_channel *channel)
1083{ 1200{
1084 u32 data; 1201 u32 data;
@@ -1089,15 +1206,23 @@ static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
1089 data = ath5k_hw_rf5110_chan2athchan(channel); 1206 data = ath5k_hw_rf5110_chan2athchan(channel);
1090 ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER); 1207 ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
1091 ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0); 1208 ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
1092 mdelay(1); 1209 usleep_range(1000, 1500);
1093 1210
1094 return 0; 1211 return 0;
1095} 1212}
1096 1213
1097/* 1214/**
1098 * Conversion needed for 5111 1215 * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
1216 * @ieee: IEEE channel number
1217 * @athchan: The &struct ath5k_athchan_2ghz
1218 *
1219 * In order to enable the RF2111 frequency converter on RF5111/2111 setups
1220 * we need to add some offsets and extra flags to the data values we pass
1221 * on to the PHY. So for every 2GHz channel this function gets called
1222 * to do the conversion.
1099 */ 1223 */
1100static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee, 1224static int
1225ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
1101 struct ath5k_athchan_2ghz *athchan) 1226 struct ath5k_athchan_2ghz *athchan)
1102{ 1227{
1103 int channel; 1228 int channel;
@@ -1123,10 +1248,13 @@ static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
1123 return 0; 1248 return 0;
1124} 1249}
1125 1250
1126/* 1251/**
1127 * Set channel on 5111 1252 * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
1253 * @ah: The &struct ath5k_hw
1254 * @channel: The &struct ieee80211_channel
1128 */ 1255 */
1129static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah, 1256static int
1257ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
1130 struct ieee80211_channel *channel) 1258 struct ieee80211_channel *channel)
1131{ 1259{
1132 struct ath5k_athchan_2ghz ath5k_channel_2ghz; 1260 struct ath5k_athchan_2ghz ath5k_channel_2ghz;
@@ -1171,10 +1299,20 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
1171 return 0; 1299 return 0;
1172} 1300}
1173 1301
1174/* 1302/**
1175 * Set channel on 5112 and newer 1303 * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
1304 * @ah: The &struct ath5k_hw
1305 * @channel: The &struct ieee80211_channel
1306 *
1307 * On RF5112/2112 and newer we don't need to do any conversion.
1308 * We pass the frequency value after a few modifications to the
1309 * chip directly.
1310 *
1311 * NOTE: Make sure channel frequency given is within our range or else
1312 * we might damage the chip ! Use ath5k_channel_ok before calling this one.
1176 */ 1313 */
1177static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah, 1314static int
1315ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1178 struct ieee80211_channel *channel) 1316 struct ieee80211_channel *channel)
1179{ 1317{
1180 u32 data, data0, data1, data2; 1318 u32 data, data0, data1, data2;
@@ -1183,17 +1321,37 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1183 data = data0 = data1 = data2 = 0; 1321 data = data0 = data1 = data2 = 0;
1184 c = channel->center_freq; 1322 c = channel->center_freq;
1185 1323
1324 /* My guess based on code:
1325 * 2GHz RF has 2 synth modes, one with a Local Oscillator
1326 * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
1327 * (3040/2). data0 is used to set the PLL divider and data1
1328 * selects synth mode. */
1186 if (c < 4800) { 1329 if (c < 4800) {
1330 /* Channel 14 and all frequencies with 2Hz spacing
1331 * below/above (non-standard channels) */
1187 if (!((c - 2224) % 5)) { 1332 if (!((c - 2224) % 5)) {
1333 /* Same as (c - 2224) / 5 */
1188 data0 = ((2 * (c - 704)) - 3040) / 10; 1334 data0 = ((2 * (c - 704)) - 3040) / 10;
1189 data1 = 1; 1335 data1 = 1;
1336 /* Channel 1 and all frequencies with 5Hz spacing
1337 * below/above (standard channels without channel 14) */
1190 } else if (!((c - 2192) % 5)) { 1338 } else if (!((c - 2192) % 5)) {
1339 /* Same as (c - 2192) / 5 */
1191 data0 = ((2 * (c - 672)) - 3040) / 10; 1340 data0 = ((2 * (c - 672)) - 3040) / 10;
1192 data1 = 0; 1341 data1 = 0;
1193 } else 1342 } else
1194 return -EINVAL; 1343 return -EINVAL;
1195 1344
1196 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8); 1345 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
1346 /* This is more complex, we have a single synthesizer with
1347 * 4 reference clock settings (?) based on frequency spacing
1348 * and set using data2. LO is at 4800Hz and data0 is again used
1349 * to set some divider.
1350 *
1351 * NOTE: There is an old atheros presentation at Stanford
1352 * that mentions a method called dual direct conversion
1353 * with 1GHz sliding IF for RF5110. Maybe that's what we
1354 * have here, or an updated version. */
1197 } else if ((c % 5) != 2 || c > 5435) { 1355 } else if ((c % 5) != 2 || c > 5435) {
1198 if (!(c % 20) && c >= 5120) { 1356 if (!(c % 20) && c >= 5120) {
1199 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); 1357 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
@@ -1219,10 +1377,16 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1219 return 0; 1377 return 0;
1220} 1378}
1221 1379
1222/* 1380/**
1223 * Set the channel on the RF2425 1381 * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
1382 * @ah: The &struct ath5k_hw
1383 * @channel: The &struct ieee80211_channel
1384 *
1385 * AR2425/2417 have a different 2GHz RF so code changes
1386 * a little bit from RF5112.
1224 */ 1387 */
1225static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah, 1388static int
1389ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
1226 struct ieee80211_channel *channel) 1390 struct ieee80211_channel *channel)
1227{ 1391{
1228 u32 data, data0, data2; 1392 u32 data, data0, data2;
@@ -1258,10 +1422,16 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
1258 return 0; 1422 return 0;
1259} 1423}
1260 1424
1261/* 1425/**
1262 * Set a channel on the radio chip 1426 * ath5k_hw_channel() - Set a channel on the radio chip
1427 * @ah: The &struct ath5k_hw
1428 * @channel: The &struct ieee80211_channel
1429 *
1430 * This is the main function called to set a channel on the
1431 * radio chip based on the radio chip version.
1263 */ 1432 */
1264static int ath5k_hw_channel(struct ath5k_hw *ah, 1433static int
1434ath5k_hw_channel(struct ath5k_hw *ah,
1265 struct ieee80211_channel *channel) 1435 struct ieee80211_channel *channel)
1266{ 1436{
1267 int ret; 1437 int ret;
@@ -1313,11 +1483,46 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
1313 return 0; 1483 return 0;
1314} 1484}
1315 1485
1486
1316/*****************\ 1487/*****************\
1317 PHY calibration 1488 PHY calibration
1318\*****************/ 1489\*****************/
1319 1490
1320static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) 1491/**
1492 * DOC: PHY Calibration routines
1493 *
1494 * Noise floor calibration: When we tell the hardware to
1495 * perform a noise floor calibration by setting the
1496 * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
1497 * sample-and-hold the minimum noise level seen at the antennas.
1498 * This value is then stored in a ring buffer of recently measured
1499 * noise floor values so we have a moving window of the last few
1500 * samples. The median of the values in the history is then loaded
1501 * into the hardware for its own use for RSSI and CCA measurements.
1502 * This type of calibration doesn't interfere with traffic.
1503 *
1504 * AGC calibration: When we tell the hardware to perform
1505 * an AGC (Automatic Gain Control) calibration by setting the
1506 * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
1507 * a calibration on the DC offsets of ADCs. During this period
1508 * rx/tx gets disabled so we have to deal with it on the driver
1509 * part.
1510 *
1511 * I/Q calibration: When we tell the hardware to perform
1512 * an I/Q calibration, it tries to correct I/Q imbalance and
1513 * fix QAM constellation by sampling data from rxed frames.
1514 * It doesn't interfere with traffic.
1515 *
1516 * For more infos on AGC and I/Q calibration check out patent doc
1517 * #03/094463.
1518 */
1519
1520/**
1521 * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
1522 * @ah: The &struct ath5k_hw
1523 */
1524static s32
1525ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
1321{ 1526{
1322 s32 val; 1527 s32 val;
1323 1528
@@ -1325,7 +1530,12 @@ static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
1325 return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8); 1530 return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
1326} 1531}
1327 1532
1328void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) 1533/**
1534 * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
1535 * @ah: The &struct ath5k_hw
1536 */
1537void
1538ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
1329{ 1539{
1330 int i; 1540 int i;
1331 1541
@@ -1334,6 +1544,11 @@ void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
1334 ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE; 1544 ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
1335} 1545}
1336 1546
1547/**
1548 * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
1549 * @ah: The &struct ath5k_hw
1550 * @noise_floor: The NF we got from hw
1551 */
1337static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor) 1552static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
1338{ 1553{
1339 struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist; 1554 struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
@@ -1341,7 +1556,12 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
1341 hist->nfval[hist->index] = noise_floor; 1556 hist->nfval[hist->index] = noise_floor;
1342} 1557}
1343 1558
1344static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah) 1559/**
1560 * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
1561 * @ah: The &struct ath5k_hw
1562 */
1563static s16
1564ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
1345{ 1565{
1346 s16 sort[ATH5K_NF_CAL_HIST_MAX]; 1566 s16 sort[ATH5K_NF_CAL_HIST_MAX];
1347 s16 tmp; 1567 s16 tmp;
@@ -1364,18 +1584,16 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
1364 return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2]; 1584 return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
1365} 1585}
1366 1586
1367/* 1587/**
1368 * When we tell the hardware to perform a noise floor calibration 1588 * ath5k_hw_update_noise_floor() - Update NF on hardware
1369 * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically 1589 * @ah: The &struct ath5k_hw
1370 * sample-and-hold the minimum noise level seen at the antennas.
1371 * This value is then stored in a ring buffer of recently measured
1372 * noise floor values so we have a moving window of the last few
1373 * samples.
1374 * 1590 *
1375 * The median of the values in the history is then loaded into the 1591 * This is the main function we call to perform a NF calibration,
1376 * hardware for its own use for RSSI and CCA measurements. 1592 * it reads NF from hardware, calculates the median and updates
1593 * NF on hw.
1377 */ 1594 */
1378void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) 1595void
1596ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1379{ 1597{
1380 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 1598 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1381 u32 val; 1599 u32 val;
@@ -1390,6 +1608,8 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1390 return; 1608 return;
1391 } 1609 }
1392 1610
1611 ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
1612
1393 ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel); 1613 ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
1394 1614
1395 /* completed NF calibration, test threshold */ 1615 /* completed NF calibration, test threshold */
@@ -1434,20 +1654,29 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1434 1654
1435 ah->ah_noise_floor = nf; 1655 ah->ah_noise_floor = nf;
1436 1656
1657 ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
1658
1437 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, 1659 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
1438 "noise floor calibrated: %d\n", nf); 1660 "noise floor calibrated: %d\n", nf);
1439} 1661}
1440 1662
1441/* 1663/**
1442 * Perform a PHY calibration on RF5110 1664 * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
1443 * -Fix BPSK/QAM Constellation (I/Q correction) 1665 * @ah: The &struct ath5k_hw
1666 * @channel: The &struct ieee80211_channel
1667 *
1668 * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
1444 */ 1669 */
1445static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah, 1670static int
1671ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1446 struct ieee80211_channel *channel) 1672 struct ieee80211_channel *channel)
1447{ 1673{
1448 u32 phy_sig, phy_agc, phy_sat, beacon; 1674 u32 phy_sig, phy_agc, phy_sat, beacon;
1449 int ret; 1675 int ret;
1450 1676
1677 if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
1678 return 0;
1679
1451 /* 1680 /*
1452 * Disable beacons and RX/TX queues, wait 1681 * Disable beacons and RX/TX queues, wait
1453 */ 1682 */
@@ -1456,7 +1685,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1456 beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210); 1685 beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
1457 ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210); 1686 ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
1458 1687
1459 mdelay(2); 1688 usleep_range(2000, 2500);
1460 1689
1461 /* 1690 /*
1462 * Set the channel (with AGC turned off) 1691 * Set the channel (with AGC turned off)
@@ -1469,7 +1698,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1469 * Activate PHY and wait 1698 * Activate PHY and wait
1470 */ 1699 */
1471 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); 1700 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
1472 mdelay(1); 1701 usleep_range(1000, 1500);
1473 1702
1474 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); 1703 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
1475 1704
@@ -1506,7 +1735,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1506 ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG); 1735 ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
1507 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE); 1736 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
1508 1737
1509 mdelay(1); 1738 usleep_range(1000, 1500);
1510 1739
1511 /* 1740 /*
1512 * Enable calibration and wait until completion 1741 * Enable calibration and wait until completion
@@ -1537,8 +1766,9 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1537 return 0; 1766 return 0;
1538} 1767}
1539 1768
1540/* 1769/**
1541 * Perform I/Q calibration on RF5111/5112 and newer chips 1770 * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
1771 * @ah: The &struct ath5k_hw
1542 */ 1772 */
1543static int 1773static int
1544ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah) 1774ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
@@ -1547,12 +1777,19 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
1547 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd; 1777 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
1548 int i; 1778 int i;
1549 1779
1550 if (!ah->ah_calibration || 1780 /* Skip if I/Q calibration is not needed or if it's still running */
1551 ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) 1781 if (!ah->ah_iq_cal_needed)
1552 return 0; 1782 return -EINVAL;
1783 else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
1784 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
1785 "I/Q calibration still running");
1786 return -EBUSY;
1787 }
1553 1788
1554 /* Calibration has finished, get the results and re-run */ 1789 /* Calibration has finished, get the results and re-run */
1555 /* work around empty results which can apparently happen on 5212 */ 1790
1791 /* Work around for empty results which can apparently happen on 5212:
1792 * Read registers up to 10 times until we get both i_pr and q_pwr */
1556 for (i = 0; i <= 10; i++) { 1793 for (i = 0; i <= 10; i++) {
1557 iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR); 1794 iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
1558 i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I); 1795 i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
@@ -1570,9 +1807,13 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
1570 else 1807 else
1571 q_coffd = q_pwr >> 7; 1808 q_coffd = q_pwr >> 7;
1572 1809
1573 /* protect against divide by 0 and loss of sign bits */ 1810 /* In case i_coffd became zero, cancel calibration
1811 * not only it's too small, it'll also result a divide
1812 * by zero later on. */
1574 if (i_coffd == 0 || q_coffd < 2) 1813 if (i_coffd == 0 || q_coffd < 2)
1575 return 0; 1814 return -ECANCELED;
1815
1816 /* Protect against loss of sign bits */
1576 1817
1577 i_coff = (-iq_corr) / i_coffd; 1818 i_coff = (-iq_corr) / i_coffd;
1578 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */ 1819 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1601,10 +1842,17 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
1601 return 0; 1842 return 0;
1602} 1843}
1603 1844
1604/* 1845/**
1605 * Perform a PHY calibration 1846 * ath5k_hw_phy_calibrate() - Perform a PHY calibration
1847 * @ah: The &struct ath5k_hw
1848 * @channel: The &struct ieee80211_channel
1849 *
1850 * The main function we call from above to perform
1851 * a short or full PHY calibration based on RF chip
1852 * and current channel
1606 */ 1853 */
1607int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, 1854int
1855ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
1608 struct ieee80211_channel *channel) 1856 struct ieee80211_channel *channel)
1609{ 1857{
1610 int ret; 1858 int ret;
@@ -1613,10 +1861,43 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
1613 return ath5k_hw_rf5110_calibrate(ah, channel); 1861 return ath5k_hw_rf5110_calibrate(ah, channel);
1614 1862
1615 ret = ath5k_hw_rf511x_iq_calibrate(ah); 1863 ret = ath5k_hw_rf511x_iq_calibrate(ah);
1864 if (ret) {
1865 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
1866 "No I/Q correction performed (%uMHz)\n",
1867 channel->center_freq);
1868
1869 /* Happens all the time if there is not much
1870 * traffic, consider it normal behaviour. */
1871 ret = 0;
1872 }
1873
1874 /* On full calibration do an AGC calibration and
1875 * request a PAPD probe for gainf calibration if
1876 * needed */
1877 if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
1616 1878
1617 if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) && 1879 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1618 (channel->hw_value != AR5K_MODE_11B)) 1880 AR5K_PHY_AGCCTL_CAL);
1619 ath5k_hw_request_rfgain_probe(ah); 1881
1882 ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
1883 AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
1884 0, false);
1885 if (ret) {
1886 ATH5K_ERR(ah,
1887 "gain calibration timeout (%uMHz)\n",
1888 channel->center_freq);
1889 }
1890
1891 if ((ah->ah_radio == AR5K_RF5111 ||
1892 ah->ah_radio == AR5K_RF5112)
1893 && (channel->hw_value != AR5K_MODE_11B))
1894 ath5k_hw_request_rfgain_probe(ah);
1895 }
1896
1897 /* Update noise floor
1898 * XXX: Only do this after AGC calibration */
1899 if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
1900 ath5k_hw_update_noise_floor(ah);
1620 1901
1621 return ret; 1902 return ret;
1622} 1903}
@@ -1626,6 +1907,16 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
1626* Spur mitigation functions * 1907* Spur mitigation functions *
1627\***************************/ 1908\***************************/
1628 1909
1910/**
1911 * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
1912 * @ah: The &struct ath5k_hw
1913 * @channel: The &struct ieee80211_channel
1914 *
1915 * This function gets called during PHY initialization to
1916 * configure the spur filter for the given channel. Spur is noise
1917 * generated due to "reflection" effects, for more information on this
1918 * method check out patent US7643810
1919 */
1629static void 1920static void
1630ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, 1921ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1631 struct ieee80211_channel *channel) 1922 struct ieee80211_channel *channel)
@@ -1865,15 +2156,73 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1865* Antenna control * 2156* Antenna control *
1866\*****************/ 2157\*****************/
1867 2158
1868static void /*TODO:Boundary check*/ 2159/**
2160 * DOC: Antenna control
2161 *
2162 * Hw supports up to 14 antennas ! I haven't found any card that implements
2163 * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
2164 * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
2165 * omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
2166 *
2167 * We can have a single antenna for RX and multiple antennas for TX.
2168 * RX antenna is our "default" antenna (usually antenna 1) set on
2169 * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
2170 * (0 for automatic selection, 1 - 14 antenna number).
2171 *
2172 * We can let hw do all the work doing fast antenna diversity for both
2173 * tx and rx or we can do things manually. Here are the options we have
2174 * (all are bits of STA_ID1 register):
2175 *
2176 * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
2177 * control descriptor, use the default antenna to transmit or else use the last
2178 * antenna on which we received an ACK.
2179 *
2180 * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
2181 * the antenna on which we got the ACK for that frame.
2182 *
2183 * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
2184 * one on the TX descriptor.
2185 *
2186 * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
2187 * (ACKs etc), or else use current antenna (the one we just used for TX).
2188 *
2189 * Using the above we support the following scenarios:
2190 *
2191 * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
2192 *
2193 * AR5K_ANTMODE_FIXED_A -> Only antenna A (MAIN) is present
2194 *
2195 * AR5K_ANTMODE_FIXED_B -> Only antenna B (AUX) is present
2196 *
2197 * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
2198 *
2199 * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
2200 *
2201 * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
2202 *
2203 * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
2204 *
2205 * Also note that when setting antenna to F on tx descriptor card inverts
2206 * current tx antenna.
2207 */
2208
2209/**
2210 * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
2211 * @ah: The &struct ath5k_hw
2212 * @ant: Antenna number
2213 */
2214static void
1869ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant) 2215ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
1870{ 2216{
1871 if (ah->ah_version != AR5K_AR5210) 2217 if (ah->ah_version != AR5K_AR5210)
1872 ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA); 2218 ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
1873} 2219}
1874 2220
1875/* 2221/**
1876 * Enable/disable fast rx antenna diversity 2222 * ath5k_hw_set_fast_div() - Enable/disable fast rx antenna diversity
2223 * @ah: The &struct ath5k_hw
2224 * @ee_mode: One of enum ath5k_driver_mode
2225 * @enable: True to enable, false to disable
1877 */ 2226 */
1878static void 2227static void
1879ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable) 2228ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
@@ -1913,6 +2262,14 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
1913 } 2262 }
1914} 2263}
1915 2264
2265/**
2266 * ath5k_hw_set_antenna_switch() - Set up antenna switch table
2267 * @ah: The &struct ath5k_hw
2268 * @ee_mode: One of enum ath5k_driver_mode
2269 *
2270 * Switch table comes from EEPROM and includes information on controlling
2271 * the 2 antenna RX attenuators
2272 */
1916void 2273void
1917ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode) 2274ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
1918{ 2275{
@@ -1944,8 +2301,10 @@ ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
1944 AR5K_PHY_ANT_SWITCH_TABLE_1); 2301 AR5K_PHY_ANT_SWITCH_TABLE_1);
1945} 2302}
1946 2303
1947/* 2304/**
1948 * Set antenna operating mode 2305 * ath5k_hw_set_antenna_mode() - Set antenna operating mode
2306 * @ah: The &struct ath5k_hw
2307 * @ant_mode: One of enum ath5k_ant_mode
1949 */ 2308 */
1950void 2309void
1951ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode) 2310ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
@@ -2068,8 +2427,13 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
2068 * Helper functions 2427 * Helper functions
2069 */ 2428 */
2070 2429
2071/* 2430/**
2072 * Do linear interpolation between two given (x, y) points 2431 * ath5k_get_interpolated_value() - Get interpolated Y val between two points
2432 * @target: X value of the middle point
2433 * @x_left: X value of the left point
2434 * @x_right: X value of the right point
2435 * @y_left: Y value of the left point
2436 * @y_right: Y value of the right point
2073 */ 2437 */
2074static s16 2438static s16
2075ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right, 2439ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
@@ -2096,13 +2460,18 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
2096 return result; 2460 return result;
2097} 2461}
2098 2462
2099/* 2463/**
2100 * Find vertical boundary (min pwr) for the linear PCDAC curve. 2464 * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
2465 * linear PCDAC curve
2466 * @stepL: Left array with y values (pcdac steps)
2467 * @stepR: Right array with y values (pcdac steps)
2468 * @pwrL: Left array with x values (power steps)
2469 * @pwrR: Right array with x values (power steps)
2101 * 2470 *
2102 * Since we have the top of the curve and we draw the line below 2471 * Since we have the top of the curve and we draw the line below
2103 * until we reach 1 (1 pcdac step) we need to know which point 2472 * until we reach 1 (1 pcdac step) we need to know which point
2104 * (x value) that is so that we don't go below y axis and have negative 2473 * (x value) that is so that we don't go below x axis and have negative
2105 * pcdac values when creating the curve, or fill the table with zeroes. 2474 * pcdac values when creating the curve, or fill the table with zeros.
2106 */ 2475 */
2107static s16 2476static s16
2108ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR, 2477ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
@@ -2148,7 +2517,16 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
2148 return max(min_pwrL, min_pwrR); 2517 return max(min_pwrL, min_pwrR);
2149} 2518}
2150 2519
2151/* 2520/**
2521 * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
2522 * @pmin: Minimum power value (xmin)
2523 * @pmax: Maximum power value (xmax)
2524 * @pwr: Array of power steps (x values)
2525 * @vpd: Array of matching PCDAC/PDADC steps (y values)
2526 * @num_points: Number of provided points
2527 * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
2528 * @type: One of enum ath5k_powertable_type (eeprom.h)
2529 *
2152 * Interpolate (pwr,vpd) points to create a Power to PDADC or a 2530 * Interpolate (pwr,vpd) points to create a Power to PDADC or a
2153 * Power to PCDAC curve. 2531 * Power to PCDAC curve.
2154 * 2532 *
@@ -2206,7 +2584,14 @@ ath5k_create_power_curve(s16 pmin, s16 pmax,
2206 } 2584 }
2207} 2585}
2208 2586
2209/* 2587/**
2588 * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
2589 * for a given channel.
2590 * @ah: The &struct ath5k_hw
2591 * @channel: The &struct ieee80211_channel
2592 * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
2593 * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
2594 *
2210 * Get the surrounding per-channel power calibration piers 2595 * Get the surrounding per-channel power calibration piers
2211 * for a given frequency so that we can interpolate between 2596 * for a given frequency so that we can interpolate between
2212 * them and come up with an appropriate dataset for our current 2597 * them and come up with an appropriate dataset for our current
@@ -2289,11 +2674,17 @@ done:
2289 *pcinfo_r = &pcinfo[idx_r]; 2674 *pcinfo_r = &pcinfo[idx_r];
2290} 2675}
2291 2676
2292/* 2677/**
2678 * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
2679 * calibration data
2680 * @ah: The &struct ath5k_hw *ah,
2681 * @channel: The &struct ieee80211_channel
2682 * @rates: The &struct ath5k_rate_pcal_info to fill
2683 *
2293 * Get the surrounding per-rate power calibration data 2684 * Get the surrounding per-rate power calibration data
2294 * for a given frequency and interpolate between power 2685 * for a given frequency and interpolate between power
2295 * values to set max target power supported by hw for 2686 * values to set max target power supported by hw for
2296 * each rate. 2687 * each rate on this frequency.
2297 */ 2688 */
2298static void 2689static void
2299ath5k_get_rate_pcal_data(struct ath5k_hw *ah, 2690ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
@@ -2381,7 +2772,11 @@ done:
2381 rpinfo[idx_r].target_power_54); 2772 rpinfo[idx_r].target_power_54);
2382} 2773}
2383 2774
2384/* 2775/**
2776 * ath5k_get_max_ctl_power() - Get max edge power for a given frequency
2777 * @ah: the &struct ath5k_hw
2778 * @channel: The &struct ieee80211_channel
2779 *
2385 * Get the max edge power for this channel if 2780 * Get the max edge power for this channel if
2386 * we have such data from EEPROM's Conformance Test 2781 * we have such data from EEPROM's Conformance Test
2387 * Limits (CTL), and limit max power if needed. 2782 * Limits (CTL), and limit max power if needed.
@@ -2461,8 +2856,39 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
2461 * Power to PCDAC table functions 2856 * Power to PCDAC table functions
2462 */ 2857 */
2463 2858
2464/* 2859/**
2465 * Fill Power to PCDAC table on RF5111 2860 * DOC: Power to PCDAC table functions
2861 *
2862 * For RF5111 we have an XPD -eXternal Power Detector- curve
2863 * for each calibrated channel. Each curve has 0,5dB Power steps
2864 * on x axis and PCDAC steps (offsets) on y axis and looks like an
2865 * exponential function. To recreate the curve we read 11 points
2866 * from eeprom (eeprom.c) and interpolate here.
2867 *
2868 * For RF5112 we have 4 XPD -eXternal Power Detector- curves
2869 * for each calibrated channel on 0, -6, -12 and -18dBm but we only
2870 * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
2871 * power steps on x axis and PCDAC steps on y axis and looks like a
2872 * linear function. To recreate the curve and pass the power values
2873 * on hw, we get 4 points for xpd 0 (lower gain -> max power)
2874 * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
2875 * and interpolate here.
2876 *
2877 * For a given channel we get the calibrated points (piers) for it or
2878 * -if we don't have calibration data for this specific channel- from the
2879 * available surrounding channels we have calibration data for, after we do a
2880 * linear interpolation between them. Then since we have our calibrated points
2881 * for this channel, we do again a linear interpolation between them to get the
2882 * whole curve.
2883 *
2884 * We finally write the Y values of the curve(s) (the PCDAC values) on hw
2885 */
2886
2887/**
2888 * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
2889 * @ah: The &struct ath5k_hw
2890 * @table_min: Minimum power (x min)
2891 * @table_max: Maximum power (x max)
2466 * 2892 *
2467 * No further processing is needed for RF5111, the only thing we have to 2893 * No further processing is needed for RF5111, the only thing we have to
2468 * do is fill the values below and above calibration range since eeprom data 2894 * do is fill the values below and above calibration range since eeprom data
@@ -2503,10 +2929,14 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
2503 2929
2504} 2930}
2505 2931
2506/* 2932/**
2507 * Combine available XPD Curves and fill Linear Power to PCDAC table 2933 * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
2508 * on RF5112 2934 * @ah: The &struct ath5k_hw
2935 * @table_min: Minimum power (x min)
2936 * @table_max: Maximum power (x max)
2937 * @pdcurves: Number of pd curves
2509 * 2938 *
2939 * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
2510 * RFX112 can have up to 2 curves (one for low txpower range and one for 2940 * RFX112 can have up to 2 curves (one for low txpower range and one for
2511 * higher txpower range). We need to put them both on pcdac_out and place 2941 * higher txpower range). We need to put them both on pcdac_out and place
2512 * them in the correct location. In case we only have one curve available 2942 * them in the correct location. In case we only have one curve available
@@ -2608,7 +3038,10 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
2608 } 3038 }
2609} 3039}
2610 3040
2611/* Write PCDAC values on hw */ 3041/**
3042 * ath5k_write_pcdac_table() - Write the PCDAC values on hw
3043 * @ah: The &struct ath5k_hw
3044 */
2612static void 3045static void
2613ath5k_write_pcdac_table(struct ath5k_hw *ah) 3046ath5k_write_pcdac_table(struct ath5k_hw *ah)
2614{ 3047{
@@ -2631,9 +3064,32 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah)
2631 * Power to PDADC table functions 3064 * Power to PDADC table functions
2632 */ 3065 */
2633 3066
2634/* 3067/**
2635 * Set the gain boundaries and create final Power to PDADC table 3068 * DOC: Power to PDADC table functions
3069 *
3070 * For RF2413 and later we have a Power to PDADC table (Power Detector)
3071 * instead of a PCDAC (Power Control) and 4 pd gain curves for each
3072 * calibrated channel. Each curve has power on x axis in 0.5 db steps and
3073 * PDADC steps on y axis and looks like an exponential function like the
3074 * RF5111 curve.
3075 *
3076 * To recreate the curves we read the points from eeprom (eeprom.c)
3077 * and interpolate here. Note that in most cases only 2 (higher and lower)
3078 * curves are used (like RF5112) but vendors have the opportunity to include
3079 * all 4 curves on eeprom. The final curve (higher power) has an extra
3080 * point for better accuracy like RF5112.
2636 * 3081 *
3082 * The process is similar to what we do above for RF5111/5112
3083 */
3084
3085/**
3086 * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
3087 * @ah: The &struct ath5k_hw
3088 * @pwr_min: Minimum power (x min)
3089 * @pwr_max: Maximum power (x max)
3090 * @pdcurves: Number of available curves
3091 *
3092 * Combine the various pd curves and create the final Power to PDADC table
2637 * We can have up to 4 pd curves, we need to do a similar process 3093 * We can have up to 4 pd curves, we need to do a similar process
2638 * as we do for RF5112. This time we don't have an edge_flag but we 3094 * as we do for RF5112. This time we don't have an edge_flag but we
2639 * set the gain boundaries on a separate register. 3095 * set the gain boundaries on a separate register.
@@ -2757,7 +3213,11 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
2757 3213
2758} 3214}
2759 3215
2760/* Write PDADC values on hw */ 3216/**
3217 * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
3218 * @ah: The &struct ath5k_hw
3219 * @ee_mode: One of enum ath5k_driver_mode
3220 */
2761static void 3221static void
2762ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode) 3222ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
2763{ 3223{
@@ -2814,7 +3274,13 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
2814 * Common code for PCDAC/PDADC tables 3274 * Common code for PCDAC/PDADC tables
2815 */ 3275 */
2816 3276
2817/* 3277/**
3278 * ath5k_setup_channel_powertable() - Set up power table for this channel
3279 * @ah: The &struct ath5k_hw
3280 * @channel: The &struct ieee80211_channel
3281 * @ee_mode: One of enum ath5k_driver_mode
3282 * @type: One of enum ath5k_powertable_type (eeprom.h)
3283 *
2818 * This is the main function that uses all of the above 3284 * This is the main function that uses all of the above
2819 * to set PCDAC/PDADC table on hw for the current channel. 3285 * to set PCDAC/PDADC table on hw for the current channel.
2820 * This table is used for tx power calibration on the baseband, 3286 * This table is used for tx power calibration on the baseband,
@@ -3012,7 +3478,12 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
3012 return 0; 3478 return 0;
3013} 3479}
3014 3480
3015/* Write power table for current channel to hw */ 3481/**
3482 * ath5k_write_channel_powertable() - Set power table for current channel on hw
3483 * @ah: The &struct ath5k_hw
3484 * @ee_mode: One of enum ath5k_driver_mode
3485 * @type: One of enum ath5k_powertable_type (eeprom.h)
3486 */
3016static void 3487static void
3017ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type) 3488ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
3018{ 3489{
@@ -3022,28 +3493,36 @@ ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
3022 ath5k_write_pcdac_table(ah); 3493 ath5k_write_pcdac_table(ah);
3023} 3494}
3024 3495
3025/* 3496
3026 * Per-rate tx power setting 3497/**
3498 * DOC: Per-rate tx power setting
3027 * 3499 *
3028 * This is the code that sets the desired tx power (below 3500 * This is the code that sets the desired tx power limit (below
3029 * maximum) on hw for each rate (we also have TPC that sets 3501 * maximum) on hw for each rate (we also have TPC that sets
3030 * power per packet). We do that by providing an index on the 3502 * power per packet type). We do that by providing an index on the
3031 * PCDAC/PDADC table we set up. 3503 * PCDAC/PDADC table we set up above, for each rate.
3032 */
3033
3034/*
3035 * Set rate power table
3036 * 3504 *
3037 * For now we only limit txpower based on maximum tx power 3505 * For now we only limit txpower based on maximum tx power
3038 * supported by hw (what's inside rate_info). We need to limit 3506 * supported by hw (what's inside rate_info) + conformance test
3039 * this even more, based on regulatory domain etc. 3507 * limits. We need to limit this even more, based on regulatory domain
3508 * etc to be safe. Normally this is done from above so we don't care
3509 * here, all we care is that the tx power we set will be O.K.
3510 * for the hw (e.g. won't create noise on PA etc).
3040 * 3511 *
3041 * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps) 3512 * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
3042 * and is indexed as follows: 3513 * x values) and is indexed as follows:
3043 * rates[0] - rates[7] -> OFDM rates 3514 * rates[0] - rates[7] -> OFDM rates
3044 * rates[8] - rates[14] -> CCK rates 3515 * rates[8] - rates[14] -> CCK rates
3045 * rates[15] -> XR rates (they all have the same power) 3516 * rates[15] -> XR rates (they all have the same power)
3046 */ 3517 */
3518
3519/**
3520 * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
3521 * @ah: The &struct ath5k_hw
3522 * @max_pwr: The maximum tx power requested in 0.5dB steps
3523 * @rate_info: The &struct ath5k_rate_pcal_info to fill
3524 * @ee_mode: One of enum ath5k_driver_mode
3525 */
3047static void 3526static void
3048ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr, 3527ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
3049 struct ath5k_rate_pcal_info *rate_info, 3528 struct ath5k_rate_pcal_info *rate_info,
@@ -3114,8 +3593,14 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
3114} 3593}
3115 3594
3116 3595
3117/* 3596/**
3118 * Set transmission power 3597 * ath5k_hw_txpower() - Set transmission power limit for a given channel
3598 * @ah: The &struct ath5k_hw
3599 * @channel: The &struct ieee80211_channel
3600 * @txpower: Requested tx power in 0.5dB steps
3601 *
3602 * Combines all of the above to set the requested tx power limit
3603 * on hw.
3119 */ 3604 */
3120static int 3605static int
3121ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, 3606ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3233,7 +3718,16 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3233 return 0; 3718 return 0;
3234} 3719}
3235 3720
3236int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) 3721/**
3722 * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
3723 * @ah: The &struct ath5k_hw
3724 * @txpower: The requested tx power limit in 0.5dB steps
3725 *
3726 * This function provides access to ath5k_hw_txpower to the driver in
3727 * case user or an application changes it while PHY is running.
3728 */
3729int
3730ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
3237{ 3731{
3238 ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER, 3732 ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
3239 "changing txpower to %d\n", txpower); 3733 "changing txpower to %d\n", txpower);
@@ -3241,11 +3735,26 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
3241 return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower); 3735 return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
3242} 3736}
3243 3737
3738
3244/*************\ 3739/*************\
3245 Init function 3740 Init function
3246\*************/ 3741\*************/
3247 3742
3248int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, 3743/**
3744 * ath5k_hw_phy_init() - Initialize PHY
3745 * @ah: The &struct ath5k_hw
3746 * @channel: The @struct ieee80211_channel
3747 * @mode: One of enum ath5k_driver_mode
3748 * @fast: Try a fast channel switch instead
3749 *
3750 * This is the main function used during reset to initialize PHY
3751 * or do a fast channel change if possible.
3752 *
3753 * NOTE: Do not call this one from the driver, it assumes PHY is in a
3754 * warm reset state !
3755 */
3756int
3757ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3249 u8 mode, bool fast) 3758 u8 mode, bool fast)
3250{ 3759{
3251 struct ieee80211_channel *curr_channel; 3760 struct ieee80211_channel *curr_channel;
@@ -3355,7 +3864,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3355 if (ret) 3864 if (ret)
3356 return ret; 3865 return ret;
3357 3866
3358 mdelay(1); 3867 usleep_range(1000, 1500);
3359 3868
3360 /* 3869 /*
3361 * Write RF buffer 3870 * Write RF buffer
@@ -3376,10 +3885,10 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3376 } 3885 }
3377 3886
3378 } else if (ah->ah_version == AR5K_AR5210) { 3887 } else if (ah->ah_version == AR5K_AR5210) {
3379 mdelay(1); 3888 usleep_range(1000, 1500);
3380 /* Disable phy and wait */ 3889 /* Disable phy and wait */
3381 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT); 3890 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
3382 mdelay(1); 3891 usleep_range(1000, 1500);
3383 } 3892 }
3384 3893
3385 /* Set channel on PHY */ 3894 /* Set channel on PHY */
@@ -3405,7 +3914,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3405 for (i = 0; i <= 20; i++) { 3914 for (i = 0; i <= 20; i++) {
3406 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) 3915 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
3407 break; 3916 break;
3408 udelay(200); 3917 usleep_range(200, 250);
3409 } 3918 }
3410 ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); 3919 ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
3411 3920
@@ -3433,9 +3942,9 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3433 3942
3434 /* At the same time start I/Q calibration for QAM constellation 3943 /* At the same time start I/Q calibration for QAM constellation
3435 * -no need for CCK- */ 3944 * -no need for CCK- */
3436 ah->ah_calibration = false; 3945 ah->ah_iq_cal_needed = false;
3437 if (!(mode == AR5K_MODE_11B)) { 3946 if (!(mode == AR5K_MODE_11B)) {
3438 ah->ah_calibration = true; 3947 ah->ah_iq_cal_needed = true;
3439 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, 3948 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
3440 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15); 3949 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
3441 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, 3950 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 776654228eaa..30b50f934172 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -17,23 +17,48 @@
17 */ 17 */
18 18
19/********************************************\ 19/********************************************\
20Queue Control Unit, DFS Control Unit Functions 20Queue Control Unit, DCF Control Unit Functions
21\********************************************/ 21\********************************************/
22 22
23#include "ath5k.h" 23#include "ath5k.h"
24#include "reg.h" 24#include "reg.h"
25#include "debug.h" 25#include "debug.h"
26#include <linux/log2.h>
27
28/**
29 * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
30 *
31 * Here we setup parameters for the 12 available TX queues. Note that
32 * on the various registers we can usually only map the first 10 of them so
33 * basically we have 10 queues to play with. Each queue has a matching
34 * QCU that controls when the queue will get triggered and multiple QCUs
35 * can be mapped to a single DCU that controls the various DFS parameters
36 * for the various queues. In our setup we have a 1:1 mapping between QCUs
37 * and DCUs allowing us to have different DFS settings for each queue.
38 *
39 * When a frame goes into a TX queue, QCU decides when it'll trigger a
40 * transmission based on various criteria (such as how many data we have inside
41 * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
42 * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
43 * (arbitrator) decides the priority of each QCU based on it's configuration
44 * (e.g. beacons are always transmitted when they leave DCU bypassing all other
45 * frames from other queues waiting to be transmitted). After a frame leaves
46 * the DCU it goes to PCU for further processing and then to PHY for
47 * the actual transmission.
48 */
26 49
27 50
28/******************\ 51/******************\
29* Helper functions * 52* Helper functions *
30\******************/ 53\******************/
31 54
32/* 55/**
33 * Get number of pending frames 56 * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
34 * for a specific queue [5211+] 57 * @ah: The &struct ath5k_hw
58 * @queue: One of enum ath5k_tx_queue_id
35 */ 59 */
36u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) 60u32
61ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
37{ 62{
38 u32 pending; 63 u32 pending;
39 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 64 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -58,10 +83,13 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
58 return pending; 83 return pending;
59} 84}
60 85
61/* 86/**
62 * Set a transmit queue inactive 87 * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
88 * @ah: The &struct ath5k_hw
89 * @queue: One of enum ath5k_tx_queue_id
63 */ 90 */
64void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) 91void
92ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
65{ 93{
66 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) 94 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
67 return; 95 return;
@@ -72,34 +100,56 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
72 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); 100 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
73} 101}
74 102
75/* 103/**
104 * ath5k_cw_validate() - Make sure the given cw is valid
105 * @cw_req: The contention window value to check
106 *
76 * Make sure cw is a power of 2 minus 1 and smaller than 1024 107 * Make sure cw is a power of 2 minus 1 and smaller than 1024
77 */ 108 */
78static u16 ath5k_cw_validate(u16 cw_req) 109static u16
110ath5k_cw_validate(u16 cw_req)
79{ 111{
80 u32 cw = 1;
81 cw_req = min(cw_req, (u16)1023); 112 cw_req = min(cw_req, (u16)1023);
82 113
83 while (cw < cw_req) 114 /* Check if cw_req + 1 a power of 2 */
84 cw = (cw << 1) | 1; 115 if (is_power_of_2(cw_req + 1))
116 return cw_req;
85 117
86 return cw; 118 /* Check if cw_req is a power of 2 */
119 if (is_power_of_2(cw_req))
120 return cw_req - 1;
121
122 /* If none of the above is correct
123 * find the closest power of 2 */
124 cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
125
126 return cw_req;
87} 127}
88 128
89/* 129/**
90 * Get properties for a transmit queue 130 * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
131 * @ah: The &struct ath5k_hw
132 * @queue: One of enum ath5k_tx_queue_id
133 * @queue_info: The &struct ath5k_txq_info to fill
91 */ 134 */
92int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, 135int
136ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
93 struct ath5k_txq_info *queue_info) 137 struct ath5k_txq_info *queue_info)
94{ 138{
95 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); 139 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
96 return 0; 140 return 0;
97} 141}
98 142
99/* 143/**
100 * Set properties for a transmit queue 144 * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
145 * @ah: The &struct ath5k_hw
146 * @queue: One of enum ath5k_tx_queue_id
147 * @qinfo: The &struct ath5k_txq_info to use
148 *
149 * Returns 0 on success or -EIO if queue is inactive
101 */ 150 */
102int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 151int
152ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
103 const struct ath5k_txq_info *qinfo) 153 const struct ath5k_txq_info *qinfo)
104{ 154{
105 struct ath5k_txq_info *qi; 155 struct ath5k_txq_info *qi;
@@ -139,10 +189,16 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
139 return 0; 189 return 0;
140} 190}
141 191
142/* 192/**
143 * Initialize a transmit queue 193 * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
194 * @ah: The &struct ath5k_hw
195 * @queue_type: One of enum ath5k_tx_queue
196 * @queue_info: The &struct ath5k_txq_info to use
197 *
198 * Returns 0 on success, -EINVAL on invalid arguments
144 */ 199 */
145int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, 200int
201ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
146 struct ath5k_txq_info *queue_info) 202 struct ath5k_txq_info *queue_info)
147{ 203{
148 unsigned int queue; 204 unsigned int queue;
@@ -217,10 +273,16 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
217* Single QCU/DCU initialization * 273* Single QCU/DCU initialization *
218\*******************************/ 274\*******************************/
219 275
220/* 276/**
221 * Set tx retry limits on DCU 277 * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
278 * @ah: The &struct ath5k_hw
279 * @queue: One of enum ath5k_tx_queue_id
280 *
281 * This function is used when initializing a queue, to set
282 * retry limits based on ah->ah_retry_* and the chipset used.
222 */ 283 */
223void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, 284void
285ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
224 unsigned int queue) 286 unsigned int queue)
225{ 287{
226 /* Single data queue on AR5210 */ 288 /* Single data queue on AR5210 */
@@ -255,15 +317,15 @@ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
255} 317}
256 318
257/** 319/**
258 * ath5k_hw_reset_tx_queue - Initialize a single hw queue 320 * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
321 * @ah: The &struct ath5k_hw
322 * @queue: One of enum ath5k_tx_queue_id
259 * 323 *
260 * @ah The &struct ath5k_hw 324 * Set DCF properties for the given transmit queue on DCU
261 * @queue The hw queue number
262 *
263 * Set DFS properties for the given transmit queue on DCU
264 * and configures all queue-specific parameters. 325 * and configures all queue-specific parameters.
265 */ 326 */
266int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) 327int
328ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
267{ 329{
268 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 330 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
269 331
@@ -491,10 +553,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
491\**************************/ 553\**************************/
492 554
493/** 555/**
494 * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU 556 * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
495 * 557 * @ah: The &struct ath5k_hw
496 * @ah The &struct ath5k_hw 558 * @slot_time: Slot time in us
497 * @slot_time Slot time in us
498 * 559 *
499 * Sets the global IFS intervals on DCU (also works on AR5210) for 560 * Sets the global IFS intervals on DCU (also works on AR5210) for
500 * the given slot time and the current bwmode. 561 * the given slot time and the current bwmode.
@@ -597,7 +658,15 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
597} 658}
598 659
599 660
600int ath5k_hw_init_queues(struct ath5k_hw *ah) 661/**
662 * ath5k_hw_init_queues() - Initialize tx queues
663 * @ah: The &struct ath5k_hw
664 *
665 * Initializes all tx queues based on information on
666 * ah->ah_txq* set by the driver
667 */
668int
669ath5k_hw_init_queues(struct ath5k_hw *ah)
601{ 670{
602 int i, ret; 671 int i, ret;
603 672
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index f5c1000045d3..0ea1608b47fd 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -280,6 +280,10 @@
280 * 5211/5212 we have one primary and 4 secondary registers. 280 * 5211/5212 we have one primary and 4 secondary registers.
281 * So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212. 281 * So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
282 * Most of these bits are common for all chipsets. 282 * Most of these bits are common for all chipsets.
283 *
284 * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
285 * the logical OR from per-queue interrupt bits found on SISR registers
286 * (see below).
283 */ 287 */
284#define AR5K_ISR 0x001c /* Register Address [5210] */ 288#define AR5K_ISR 0x001c /* Register Address [5210] */
285#define AR5K_PISR 0x0080 /* Register Address [5211+] */ 289#define AR5K_PISR 0x0080 /* Register Address [5211+] */
@@ -292,7 +296,10 @@
292#define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */ 296#define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */
293#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */ 297#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */
294#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */ 298#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */
295#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) */ 299#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout)
300 * NOTE: We don't have per-queue info for this
301 * one, but we can enable it per-queue through
302 * TXNOFRM_QCU field on TXNOFRM register */
296#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */ 303#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
297#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */ 304#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
298#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */ 305#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
@@ -302,21 +309,29 @@
302#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */ 309#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
303#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */ 310#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
304#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */ 311#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
305#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ 312#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+]
313 * 'or' of MCABT, SSERR, DPERR from SISR2 */
306#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */ 314#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
307#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */ 315#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
308#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */ 316#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
309#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */ 317#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
310#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */ 318#define AR5K_ISR_DPERR 0x00400000 /* Bus parity error [5210] */
311#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */ 319#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
312#define AR5K_ISR_TIM 0x00800000 /* [5211+] */ 320#define AR5K_ISR_TIM 0x00800000 /* [5211+] */
313#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT, 321#define AR5K_ISR_BCNMISC 0x00800000 /* Misc beacon related interrupt
314 CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */ 322 * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
323 * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
315#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */ 324#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */
316#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */ 325#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */
317#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */ 326#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */
318#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */ 327#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
319 328
329#define AR5K_ISR_BITS_FROM_SISRS (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
330 AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
331 AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
332 AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
333 AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
334
320/* 335/*
321 * Secondary status registers [5211+] (0 - 4) 336 * Secondary status registers [5211+] (0 - 4)
322 * 337 *
@@ -347,7 +362,7 @@
347#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */ 362#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
348#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */ 363#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
349#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */ 364#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
350#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */ 365#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF Out of range */
351 366
352#define AR5K_SISR3 0x0090 /* Register Address [5211+] */ 367#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
353#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */ 368#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 2abac257b4b4..4aed3a3ab109 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,9 +19,9 @@
19 * 19 *
20 */ 20 */
21 21
22/*****************************\ 22/****************************\
23 Reset functions and helpers 23 Reset function and helpers
24\*****************************/ 24\****************************/
25 25
26#include <asm/unaligned.h> 26#include <asm/unaligned.h>
27 27
@@ -33,14 +33,36 @@
33#include "debug.h" 33#include "debug.h"
34 34
35 35
36/**
37 * DOC: Reset function and helpers
38 *
39 * Here we implement the main reset routine, used to bring the card
40 * to a working state and ready to receive. We also handle routines
41 * that don't fit on other places such as clock, sleep and power control
42 */
43
44
36/******************\ 45/******************\
37* Helper functions * 46* Helper functions *
38\******************/ 47\******************/
39 48
40/* 49/**
41 * Check if a register write has been completed 50 * ath5k_hw_register_timeout() - Poll a register for a flag/field change
51 * @ah: The &struct ath5k_hw
52 * @reg: The register to read
53 * @flag: The flag/field to check on the register
54 * @val: The field value we expect (if we check a field)
55 * @is_set: Instead of checking if the flag got cleared, check if it got set
56 *
57 * Some registers contain flags that indicate that an operation is
58 * running. We use this function to poll these registers and check
59 * if these flags get cleared. We also use it to poll a register
60 * field (containing multiple flags) until it gets a specific value.
61 *
62 * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
42 */ 63 */
43int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, 64int
65ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
44 bool is_set) 66 bool is_set)
45{ 67{
46 int i; 68 int i;
@@ -64,35 +86,48 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
64\*************************/ 86\*************************/
65 87
66/** 88/**
67 * ath5k_hw_htoclock - Translate usec to hw clock units 89 * ath5k_hw_htoclock() - Translate usec to hw clock units
68 *
69 * @ah: The &struct ath5k_hw 90 * @ah: The &struct ath5k_hw
70 * @usec: value in microseconds 91 * @usec: value in microseconds
92 *
93 * Translate usecs to hw clock units based on the current
94 * hw clock rate.
95 *
96 * Returns number of clock units
71 */ 97 */
72unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec) 98unsigned int
99ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
73{ 100{
74 struct ath_common *common = ath5k_hw_common(ah); 101 struct ath_common *common = ath5k_hw_common(ah);
75 return usec * common->clockrate; 102 return usec * common->clockrate;
76} 103}
77 104
78/** 105/**
79 * ath5k_hw_clocktoh - Translate hw clock units to usec 106 * ath5k_hw_clocktoh() - Translate hw clock units to usec
107 * @ah: The &struct ath5k_hw
80 * @clock: value in hw clock units 108 * @clock: value in hw clock units
109 *
110 * Translate hw clock units to usecs based on the current
111 * hw clock rate.
112 *
113 * Returns number of usecs
81 */ 114 */
82unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock) 115unsigned int
116ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
83{ 117{
84 struct ath_common *common = ath5k_hw_common(ah); 118 struct ath_common *common = ath5k_hw_common(ah);
85 return clock / common->clockrate; 119 return clock / common->clockrate;
86} 120}
87 121
88/** 122/**
89 * ath5k_hw_init_core_clock - Initialize core clock 123 * ath5k_hw_init_core_clock() - Initialize core clock
90 * 124 * @ah: The &struct ath5k_hw
91 * @ah The &struct ath5k_hw
92 * 125 *
93 * Initialize core clock parameters (usec, usec32, latencies etc). 126 * Initialize core clock parameters (usec, usec32, latencies etc),
127 * based on current bwmode and chipset properties.
94 */ 128 */
95static void ath5k_hw_init_core_clock(struct ath5k_hw *ah) 129static void
130ath5k_hw_init_core_clock(struct ath5k_hw *ah)
96{ 131{
97 struct ieee80211_channel *channel = ah->ah_current_channel; 132 struct ieee80211_channel *channel = ah->ah_current_channel;
98 struct ath_common *common = ath5k_hw_common(ah); 133 struct ath_common *common = ath5k_hw_common(ah);
@@ -227,16 +262,21 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
227 } 262 }
228} 263}
229 264
230/* 265/**
266 * ath5k_hw_set_sleep_clock() - Setup sleep clock operation
267 * @ah: The &struct ath5k_hw
268 * @enable: Enable sleep clock operation (false to disable)
269 *
231 * If there is an external 32KHz crystal available, use it 270 * If there is an external 32KHz crystal available, use it
232 * as ref. clock instead of 32/40MHz clock and baseband clocks 271 * as ref. clock instead of 32/40MHz clock and baseband clocks
233 * to save power during sleep or restore normal 32/40MHz 272 * to save power during sleep or restore normal 32/40MHz
234 * operation. 273 * operation.
235 * 274 *
236 * XXX: When operating on 32KHz certain PHY registers (27 - 31, 275 * NOTE: When operating on 32KHz certain PHY registers (27 - 31,
237 * 123 - 127) require delay on access. 276 * 123 - 127) require delay on access.
238 */ 277 */
239static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable) 278static void
279ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
240{ 280{
241 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 281 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
242 u32 scal, spending, sclock; 282 u32 scal, spending, sclock;
@@ -340,10 +380,19 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
340* Reset/Sleep control * 380* Reset/Sleep control *
341\*********************/ 381\*********************/
342 382
343/* 383/**
344 * Reset chipset 384 * ath5k_hw_nic_reset() - Reset the various chipset units
385 * @ah: The &struct ath5k_hw
386 * @val: Mask to indicate what units to reset
387 *
388 * To reset the various chipset units we need to write
389 * the mask to AR5K_RESET_CTL and poll the register until
390 * all flags are cleared.
391 *
392 * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
345 */ 393 */
346static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val) 394static int
395ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
347{ 396{
348 int ret; 397 int ret;
349 u32 mask = val ? val : ~0U; 398 u32 mask = val ? val : ~0U;
@@ -357,7 +406,7 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
357 ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL); 406 ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
358 407
359 /* Wait at least 128 PCI clocks */ 408 /* Wait at least 128 PCI clocks */
360 udelay(15); 409 usleep_range(15, 20);
361 410
362 if (ah->ah_version == AR5K_AR5210) { 411 if (ah->ah_version == AR5K_AR5210) {
363 val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA 412 val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
@@ -382,12 +431,17 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
382 return ret; 431 return ret;
383} 432}
384 433
385/* 434/**
386 * Reset AHB chipset 435 * ath5k_hw_wisoc_reset() - Reset AHB chipset
387 * AR5K_RESET_CTL_PCU flag resets WMAC 436 * @ah: The &struct ath5k_hw
388 * AR5K_RESET_CTL_BASEBAND flag resets WBB 437 * @flags: Mask to indicate what units to reset
438 *
439 * Same as ath5k_hw_nic_reset but for AHB based devices
440 *
441 * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
389 */ 442 */
390static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) 443static int
444ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
391{ 445{
392 u32 mask = flags ? flags : ~0U; 446 u32 mask = flags ? flags : ~0U;
393 u32 __iomem *reg; 447 u32 __iomem *reg;
@@ -422,7 +476,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
422 regval = __raw_readl(reg); 476 regval = __raw_readl(reg);
423 __raw_writel(regval | val, reg); 477 __raw_writel(regval | val, reg);
424 regval = __raw_readl(reg); 478 regval = __raw_readl(reg);
425 udelay(100); 479 usleep_range(100, 150);
426 480
427 /* Bring BB/MAC out of reset */ 481 /* Bring BB/MAC out of reset */
428 __raw_writel(regval & ~val, reg); 482 __raw_writel(regval & ~val, reg);
@@ -439,11 +493,23 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
439 return 0; 493 return 0;
440} 494}
441 495
442 496/**
443/* 497 * ath5k_hw_set_power_mode() - Set power mode
444 * Sleep control 498 * @ah: The &struct ath5k_hw
499 * @mode: One of enum ath5k_power_mode
500 * @set_chip: Set to true to write sleep control register
501 * @sleep_duration: How much time the device is allowed to sleep
502 * when sleep logic is enabled (in 128 microsecond increments).
503 *
504 * This function is used to configure sleep policy and allowed
505 * sleep modes. For more information check out the sleep control
506 * register on reg.h and STA_ID1.
507 *
508 * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
509 * mode is requested.
445 */ 510 */
446static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, 511static int
512ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
447 bool set_chip, u16 sleep_duration) 513 bool set_chip, u16 sleep_duration)
448{ 514{
449 unsigned int i; 515 unsigned int i;
@@ -493,7 +559,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
493 559
494 ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE, 560 ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
495 AR5K_SLEEP_CTL); 561 AR5K_SLEEP_CTL);
496 udelay(15); 562 usleep_range(15, 20);
497 563
498 for (i = 200; i > 0; i--) { 564 for (i = 200; i > 0; i--) {
499 /* Check if the chip did wake up */ 565 /* Check if the chip did wake up */
@@ -502,7 +568,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
502 break; 568 break;
503 569
504 /* Wait a bit and retry */ 570 /* Wait a bit and retry */
505 udelay(50); 571 usleep_range(50, 75);
506 ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE, 572 ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
507 AR5K_SLEEP_CTL); 573 AR5K_SLEEP_CTL);
508 } 574 }
@@ -523,17 +589,20 @@ commit:
523 return 0; 589 return 0;
524} 590}
525 591
526/* 592/**
527 * Put device on hold 593 * ath5k_hw_on_hold() - Put device on hold
594 * @ah: The &struct ath5k_hw
528 * 595 *
529 * Put MAC and Baseband on warm reset and 596 * Put MAC and Baseband on warm reset and keep that state
530 * keep that state (don't clean sleep control 597 * (don't clean sleep control register). After this MAC
531 * register). After this MAC and Baseband are 598 * and Baseband are disabled and a full reset is needed
532 * disabled and a full reset is needed to come 599 * to come back. This way we save as much power as possible
533 * back. This way we save as much power as possible
534 * without putting the card on full sleep. 600 * without putting the card on full sleep.
601 *
602 * Returns 0 on success or -EIO on error
535 */ 603 */
536int ath5k_hw_on_hold(struct ath5k_hw *ah) 604int
605ath5k_hw_on_hold(struct ath5k_hw *ah)
537{ 606{
538 struct pci_dev *pdev = ah->pdev; 607 struct pci_dev *pdev = ah->pdev;
539 u32 bus_flags; 608 u32 bus_flags;
@@ -543,7 +612,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
543 return 0; 612 return 0;
544 613
545 /* Make sure device is awake */ 614 /* Make sure device is awake */
546 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); 615 ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
547 if (ret) { 616 if (ret) {
548 ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n"); 617 ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
549 return ret; 618 return ret;
@@ -563,7 +632,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
563 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | 632 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
564 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | 633 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
565 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); 634 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
566 mdelay(2); 635 usleep_range(2000, 2500);
567 } else { 636 } else {
568 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | 637 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
569 AR5K_RESET_CTL_BASEBAND | bus_flags); 638 AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -575,7 +644,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
575 } 644 }
576 645
577 /* ...wakeup again!*/ 646 /* ...wakeup again!*/
578 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); 647 ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
579 if (ret) { 648 if (ret) {
580 ATH5K_ERR(ah, "failed to put device on hold\n"); 649 ATH5K_ERR(ah, "failed to put device on hold\n");
581 return ret; 650 return ret;
@@ -584,11 +653,18 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
584 return ret; 653 return ret;
585} 654}
586 655
587/* 656/**
657 * ath5k_hw_nic_wakeup() - Force card out of sleep
658 * @ah: The &struct ath5k_hw
659 * @channel: The &struct ieee80211_channel
660 *
588 * Bring up MAC + PHY Chips and program PLL 661 * Bring up MAC + PHY Chips and program PLL
589 * Channel is NULL for the initial wakeup. 662 * NOTE: Channel is NULL for the initial wakeup.
663 *
664 * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
590 */ 665 */
591int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel) 666int
667ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
592{ 668{
593 struct pci_dev *pdev = ah->pdev; 669 struct pci_dev *pdev = ah->pdev;
594 u32 turbo, mode, clock, bus_flags; 670 u32 turbo, mode, clock, bus_flags;
@@ -600,7 +676,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
600 676
601 if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) { 677 if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
602 /* Wakeup the device */ 678 /* Wakeup the device */
603 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); 679 ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
604 if (ret) { 680 if (ret) {
605 ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n"); 681 ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
606 return ret; 682 return ret;
@@ -621,7 +697,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
621 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | 697 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
622 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA | 698 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
623 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); 699 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
624 mdelay(2); 700 usleep_range(2000, 2500);
625 } else { 701 } else {
626 if (ath5k_get_bus_type(ah) == ATH_AHB) 702 if (ath5k_get_bus_type(ah) == ATH_AHB)
627 ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU | 703 ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -637,7 +713,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
637 } 713 }
638 714
639 /* ...wakeup again!...*/ 715 /* ...wakeup again!...*/
640 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); 716 ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
641 if (ret) { 717 if (ret) {
642 ATH5K_ERR(ah, "failed to resume the MAC Chip\n"); 718 ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
643 return ret; 719 return ret;
@@ -739,7 +815,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
739 /* ...update PLL if needed */ 815 /* ...update PLL if needed */
740 if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) { 816 if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
741 ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL); 817 ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
742 udelay(300); 818 usleep_range(300, 350);
743 } 819 }
744 820
745 /* ...set the PHY operating mode */ 821 /* ...set the PHY operating mode */
@@ -755,8 +831,19 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
755* Post-initvals register modifications * 831* Post-initvals register modifications *
756\**************************************/ 832\**************************************/
757 833
758/* TODO: Half/Quarter rate */ 834/**
759static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, 835 * ath5k_hw_tweak_initval_settings() - Tweak initial settings
836 * @ah: The &struct ath5k_hw
837 * @channel: The &struct ieee80211_channel
838 *
839 * Some settings are not handled on initvals, e.g. bwmode
840 * settings, some phy settings, workarounds etc that in general
841 * don't fit anywhere else or are too small to introduce a separate
842 * function for each one. So we have this function to handle
843 * them all during reset and complete card's initialization.
844 */
845static void
846ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
760 struct ieee80211_channel *channel) 847 struct ieee80211_channel *channel)
761{ 848{
762 if (ah->ah_version == AR5K_AR5212 && 849 if (ah->ah_version == AR5K_AR5212 &&
@@ -875,7 +962,16 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
875 } 962 }
876} 963}
877 964
878static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, 965/**
966 * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
967 * @ah: The &struct ath5k_hw
968 * @channel: The &struct ieee80211_channel
969 *
970 * Use settings stored on EEPROM to properly initialize the card
971 * based on various infos and per-mode calibration data.
972 */
973static void
974ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
879 struct ieee80211_channel *channel) 975 struct ieee80211_channel *channel)
880{ 976{
881 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 977 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -1029,7 +1125,23 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
1029* Main reset function * 1125* Main reset function *
1030\*********************/ 1126\*********************/
1031 1127
1032int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, 1128/**
1129 * ath5k_hw_reset() - The main reset function
1130 * @ah: The &struct ath5k_hw
1131 * @op_mode: One of enum nl80211_iftype
1132 * @channel: The &struct ieee80211_channel
1133 * @fast: Enable fast channel switching
1134 * @skip_pcu: Skip pcu initialization
1135 *
1136 * This is the function we call each time we want to (re)initialize the
1137 * card and pass new settings to hw. We also call it when hw runs into
1138 * trouble to make it come back to a working state.
1139 *
1140 * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
1141 * on failure.
1142 */
1143int
1144ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1033 struct ieee80211_channel *channel, bool fast, bool skip_pcu) 1145 struct ieee80211_channel *channel, bool fast, bool skip_pcu)
1034{ 1146{
1035 u32 s_seq[10], s_led[3], tsf_up, tsf_lo; 1147 u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
@@ -1242,7 +1354,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1242 /* 1354 /*
1243 * Initialize PCU 1355 * Initialize PCU
1244 */ 1356 */
1245 ath5k_hw_pcu_init(ah, op_mode, mode); 1357 ath5k_hw_pcu_init(ah, op_mode);
1246 1358
1247 /* 1359 /*
1248 * Initialize PHY 1360 * Initialize PHY
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index 5d11c23b4297..aed34d9954c0 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -18,7 +18,9 @@
18 */ 18 */
19 19
20 20
21/* 21/**
22 * DOC: RF Buffer registers
23 *
22 * There are some special registers on the RF chip 24 * There are some special registers on the RF chip
23 * that control various operation settings related mostly to 25 * that control various operation settings related mostly to
24 * the analog parts (channel, gain adjustment etc). 26 * the analog parts (channel, gain adjustment etc).
@@ -44,40 +46,63 @@
44 */ 46 */
45 47
46 48
47/* 49/**
50 * struct ath5k_ini_rfbuffer - Initial RF Buffer settings
51 * @rfb_bank: RF Bank number
52 * @rfb_ctrl_register: RF Buffer control register
53 * @rfb_mode_data: RF Buffer data for each mode
54 *
48 * Struct to hold default mode specific RF 55 * Struct to hold default mode specific RF
49 * register values (RF Banks) 56 * register values (RF Banks) for each chip.
50 */ 57 */
51struct ath5k_ini_rfbuffer { 58struct ath5k_ini_rfbuffer {
52 u8 rfb_bank; /* RF Bank number */ 59 u8 rfb_bank;
53 u16 rfb_ctrl_register; /* RF Buffer control register */ 60 u16 rfb_ctrl_register;
54 u32 rfb_mode_data[3]; /* RF Buffer data for each mode */ 61 u32 rfb_mode_data[3];
55}; 62};
56 63
57/* 64/**
65 * struct ath5k_rfb_field - An RF Buffer field (register/value)
66 * @len: Field length
67 * @pos: Offset on the raw packet
68 * @col: Used for shifting
69 *
58 * Struct to hold RF Buffer field 70 * Struct to hold RF Buffer field
59 * infos used to access certain RF 71 * infos used to access certain RF
60 * analog registers 72 * analog registers
61 */ 73 */
62struct ath5k_rfb_field { 74struct ath5k_rfb_field {
63 u8 len; /* Field length */ 75 u8 len;
64 u16 pos; /* Offset on the raw packet */ 76 u16 pos;
65 u8 col; /* Column -used for shifting */ 77 u8 col;
66}; 78};
67 79
68/* 80/**
69 * RF analog register definition 81 * struct ath5k_rf_reg - RF analog register definition
82 * @bank: RF Buffer Bank number
83 * @index: Register's index on ath5k_rf_regx_idx
84 * @field: The &struct ath5k_rfb_field
85 *
86 * We use this struct to define the set of RF registers
87 * on each chip that we want to tweak. Some RF registers
88 * are common between different chip versions so this saves
89 * us space and complexity because we can refer to an rf
90 * register by it's index no matter what chip we work with
91 * as long as it has that register.
70 */ 92 */
71struct ath5k_rf_reg { 93struct ath5k_rf_reg {
72 u8 bank; /* RF Buffer Bank number */ 94 u8 bank;
73 u8 index; /* Register's index on rf_regs_idx */ 95 u8 index;
74 struct ath5k_rfb_field field; /* RF Buffer field for this register */ 96 struct ath5k_rfb_field field;
75}; 97};
76 98
77/* Map RF registers to indexes 99/**
100 * enum ath5k_rf_regs_idx - Map RF registers to indexes
101 *
78 * We do this to handle common bits and make our 102 * We do this to handle common bits and make our
79 * life easier by using an index for each register 103 * life easier by using an index for each register
80 * instead of a full rfb_field */ 104 * instead of a full rfb_field
105 */
81enum ath5k_rf_regs_idx { 106enum ath5k_rf_regs_idx {
82 /* BANK 2 */ 107 /* BANK 2 */
83 AR5K_RF_TURBO = 0, 108 AR5K_RF_TURBO = 0,
diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h
index ebfae052d89e..4d21df0e5975 100644
--- a/drivers/net/wireless/ath/ath5k/rfgain.h
+++ b/drivers/net/wireless/ath/ath5k/rfgain.h
@@ -18,13 +18,17 @@
18 * 18 *
19 */ 19 */
20 20
21/* 21/**
22 * struct ath5k_ini_rfgain - RF Gain table
23 * @rfg_register: RF Gain register address
24 * @rfg_value: Register value for 5 and 2GHz
25 *
22 * Mode-specific RF Gain table (64bytes) for RF5111/5112 26 * Mode-specific RF Gain table (64bytes) for RF5111/5112
23 * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial 27 * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
24 * RF Gain values are included in AR5K_AR5210_INI) 28 * RF Gain values are included in AR5K_AR5210_INI)
25 */ 29 */
26struct ath5k_ini_rfgain { 30struct ath5k_ini_rfgain {
27 u16 rfg_register; /* RF Gain register address */ 31 u16 rfg_register;
28 u32 rfg_value[2]; /* [freq (see below)] */ 32 u32 rfg_value[2]; /* [freq (see below)] */
29}; 33};
30 34
@@ -455,18 +459,31 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = {
455#define AR5K_GAIN_CHECK_ADJUST(_g) \ 459#define AR5K_GAIN_CHECK_ADJUST(_g) \
456 ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high) 460 ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
457 461
462/**
463 * struct ath5k_gain_opt_step - An RF gain optimization step
464 * @gos_param: Set of parameters
465 * @gos_gain: Gain
466 */
458struct ath5k_gain_opt_step { 467struct ath5k_gain_opt_step {
459 s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS]; 468 s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
460 s8 gos_gain; 469 s8 gos_gain;
461}; 470};
462 471
472/**
473 * struct ath5k_gain_opt - RF Gain optimization ladder
474 * @go_default: The default step
475 * @go_steps_count: How many optimization steps
476 * @go_step: Array of &struct ath5k_gain_opt_step
477 */
463struct ath5k_gain_opt { 478struct ath5k_gain_opt {
464 u8 go_default; 479 u8 go_default;
465 u8 go_steps_count; 480 u8 go_steps_count;
466 const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT]; 481 const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
467}; 482};
468 483
484
469/* 485/*
486 * RF5111
470 * Parameters on gos_param: 487 * Parameters on gos_param:
471 * 1) Tx clip PHY register 488 * 1) Tx clip PHY register
472 * 2) PWD 90 RF register 489 * 2) PWD 90 RF register
@@ -490,6 +507,7 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = {
490}; 507};
491 508
492/* 509/*
510 * RF5112
493 * Parameters on gos_param: 511 * Parameters on gos_param:
494 * 1) Mixgain ovr RF register 512 * 1) Mixgain ovr RF register
495 * 2) PWD 138 RF register 513 * 2) PWD 138 RF register
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index abe3af3c6188..6c59a217b1a1 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/moduleparam.h>
18
17#include "core.h" 19#include "core.h"
18#include "cfg80211.h" 20#include "cfg80211.h"
19#include "debug.h" 21#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index cf513a80b061..eb808b46f94c 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -19,6 +19,7 @@
19#include <linux/circ_buf.h> 19#include <linux/circ_buf.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
22#include <linux/export.h>
22 23
23#include "debug.h" 24#include "debug.h"
24#include "target.h" 25#include "target.h"
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 9b779aac83e7..e569c652e35c 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -46,8 +46,8 @@ enum ATH6K_DEBUG_MASK {
46}; 46};
47 47
48extern unsigned int debug_mask; 48extern unsigned int debug_mask;
49extern int ath6kl_printk(const char *level, const char *fmt, ...) 49extern __printf(2, 3)
50 __attribute__ ((format (printf, 2, 3))); 50int ath6kl_printk(const char *level, const char *fmt, ...);
51 51
52#define ath6kl_info(fmt, ...) \ 52#define ath6kl_info(fmt, ...) \
53 ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__) 53 ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__)
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index c614049d7b2e..368ecbd172a3 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1663,6 +1663,7 @@ int ath6kl_core_init(struct ath6kl *ar)
1663 1663
1664 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM | 1664 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
1665 WIPHY_FLAG_HAVE_AP_SME | 1665 WIPHY_FLAG_HAVE_AP_SME |
1666 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
1666 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 1667 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
1667 1668
1668 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities)) 1669 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 07903e6114d8..15c3f56caf4f 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/module.h>
17#include <linux/mmc/card.h> 18#include <linux/mmc/card.h>
18#include <linux/mmc/mmc.h> 19#include <linux/mmc/mmc.h>
19#include <linux/mmc/host.h> 20#include <linux/mmc/host.h>
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7b4c074e12fa..1b4786ae00ac 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -2,6 +2,9 @@ config ATH9K_HW
2 tristate 2 tristate
3config ATH9K_COMMON 3config ATH9K_COMMON
4 tristate 4 tristate
5config ATH9K_DFS_DEBUGFS
6 def_bool y
7 depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
5 8
6config ATH9K 9config ATH9K
7 tristate "Atheros 802.11n wireless cards support" 10 tristate "Atheros 802.11n wireless cards support"
@@ -51,6 +54,25 @@ config ATH9K_DEBUGFS
51 54
52 Also required for changing debug message flags at run time. 55 Also required for changing debug message flags at run time.
53 56
57config ATH9K_DFS_CERTIFIED
58 bool "Atheros DFS support for certified platforms"
59 depends on ATH9K && EXPERT
60 default n
61 ---help---
62 This option enables DFS support for initiating radiation on
63 ath9k. There is no way to dynamically detect if a card was DFS
64 certified and as such this is left as a build time option. This
65 option should only be enabled by system integrators that can
66 guarantee that all the platforms that their kernel will run on
67 have obtained appropriate regulatory body certification for a
68 respective Atheros card by using ath9k on the target shipping
69 platforms.
70
71 This is currently only a placeholder for future DFS support,
72 as DFS support requires more components that still need to be
73 developed. At this point enabling this option won't do anything
74 except increase code size.
75
54config ATH9K_RATE_CONTROL 76config ATH9K_RATE_CONTROL
55 bool "Atheros ath9k rate control" 77 bool "Atheros ath9k rate control"
56 depends on ATH9K 78 depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 49d3f25f509d..da02242499af 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -10,6 +10,8 @@ ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
10ath9k-$(CONFIG_ATH9K_PCI) += pci.o 10ath9k-$(CONFIG_ATH9K_PCI) += pci.o
11ath9k-$(CONFIG_ATH9K_AHB) += ahb.o 11ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
13ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
14ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
13 15
14obj-$(CONFIG_ATH9K) += ath9k.o 16obj-$(CONFIG_ATH9K) += ath9k.o
15 17
@@ -34,7 +36,8 @@ ath9k_hw-y:= \
34 ar9002_mac.o \ 36 ar9002_mac.o \
35 ar9003_mac.o \ 37 ar9003_mac.o \
36 ar9003_eeprom.o \ 38 ar9003_eeprom.o \
37 ar9003_paprd.o 39 ar9003_paprd.o \
40 ar9003_mci.o
38 41
39obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o 42obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
40 43
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 85a54cd2b083..5e47ca6d16a8 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -19,6 +19,7 @@
19#include <linux/nl80211.h> 19#include <linux/nl80211.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/ath9k_platform.h> 21#include <linux/ath9k_platform.h>
22#include <linux/module.h>
22#include "ath9k.h" 23#include "ath9k.h"
23 24
24static const struct platform_device_id ath9k_platform_id_table[] = { 25static const struct platform_device_id ath9k_platform_id_table[] = {
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2776c3c1f506..a639b94f7643 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/export.h>
18#include "hw.h" 19#include "hw.h"
19#include "hw-ops.h" 20#include "hw-ops.h"
20 21
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 88279e325dca..157337febc2b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -203,7 +203,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
203 i); 203 i);
204 204
205 ath_dbg(common, ATH_DBG_CALIBRATE, 205 ath_dbg(common, ATH_DBG_CALIBRATE,
206 "Orignal: Chn %diq_corr_meas = 0x%08x\n", 206 "Original: Chn %d iq_corr_meas = 0x%08x\n",
207 i, ah->totalIqCorrMeas[i]); 207 i, ah->totalIqCorrMeas[i]);
208 208
209 iqCorrNeg = 0; 209 iqCorrNeg = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 626d547d2f06..11f192a1ceb7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/moduleparam.h>
17#include "hw.h" 18#include "hw.h"
18#include "ar5008_initvals.h" 19#include "ar5008_initvals.h"
19#include "ar9001_initvals.h" 20#include "ar9001_initvals.h"
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index f7d8e516a2a9..b5920168606d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include <linux/export.h>
18 19
19#define AR_BufLen 0x00000fff 20#define AR_BufLen 0x00000fff
20 21
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 12a730dcb500..23b3a6c57800 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,6 +18,7 @@
18#include "hw-ops.h" 18#include "hw-ops.h"
19#include "ar9003_phy.h" 19#include "ar9003_phy.h"
20#include "ar9003_rtt.h" 20#include "ar9003_rtt.h"
21#include "ar9003_mci.h"
21 22
22#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT 23#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
23#define MAX_MAG_DELTA 11 24#define MAX_MAG_DELTA 11
@@ -225,7 +226,7 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
225 i); 226 i);
226 227
227 ath_dbg(common, ATH_DBG_CALIBRATE, 228 ath_dbg(common, ATH_DBG_CALIBRATE,
228 "Orignal: Chn %diq_corr_meas = 0x%08x\n", 229 "Original: Chn %d iq_corr_meas = 0x%08x\n",
229 i, ah->totalIqCorrMeas[i]); 230 i, ah->totalIqCorrMeas[i]);
230 231
231 iqCorrNeg = 0; 232 iqCorrNeg = 0;
@@ -824,7 +825,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
824 chan_info_tab[i] + offset); 825 chan_info_tab[i] + offset);
825 826
826 ath_dbg(common, ATH_DBG_CALIBRATE, 827 ath_dbg(common, ATH_DBG_CALIBRATE,
827 "IQ RES[%d]=0x%x" 828 "IQ_RES[%d]=0x%x "
828 "IQ_RES[%d]=0x%x\n", 829 "IQ_RES[%d]=0x%x\n",
829 idx, iq_res[idx], idx + 1, 830 idx, iq_res[idx], idx + 1,
830 iq_res[idx + 1]); 831 iq_res[idx + 1]);
@@ -934,10 +935,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
934{ 935{
935 struct ath_common *common = ath9k_hw_common(ah); 936 struct ath_common *common = ath9k_hw_common(ah);
936 struct ath9k_hw_cal_data *caldata = ah->caldata; 937 struct ath9k_hw_cal_data *caldata = ah->caldata;
938 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
937 bool txiqcal_done = false, txclcal_done = false; 939 bool txiqcal_done = false, txclcal_done = false;
938 bool is_reusable = true, status = true; 940 bool is_reusable = true, status = true;
939 bool run_rtt_cal = false, run_agc_cal; 941 bool run_rtt_cal = false, run_agc_cal;
940 bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT); 942 bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
943 bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
941 u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL | 944 u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
942 AR_PHY_AGC_CONTROL_FLTR_CAL | 945 AR_PHY_AGC_CONTROL_FLTR_CAL |
943 AR_PHY_AGC_CONTROL_PKDET_CAL; 946 AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1005,6 +1008,31 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
1005 } else if (caldata && !caldata->done_txiqcal_once) 1008 } else if (caldata && !caldata->done_txiqcal_once)
1006 run_agc_cal = true; 1009 run_agc_cal = true;
1007 1010
1011 if (mci && IS_CHAN_2GHZ(chan) &&
1012 (mci_hw->bt_state == MCI_BT_AWAKE) &&
1013 run_agc_cal &&
1014 !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
1015
1016 u32 pld[4] = {0, 0, 0, 0};
1017
1018 /* send CAL_REQ only when BT is AWAKE. */
1019 ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
1020 mci_hw->wlan_cal_seq);
1021 MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
1022 pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
1023 ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
1024
1025 /* Wait BT_CAL_GRANT for 50ms */
1026 ath_dbg(common, ATH_DBG_MCI, "MCI wait for BT_CAL_GRANT");
1027
1028 if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
1029 ath_dbg(common, ATH_DBG_MCI, "MCI got BT_CAL_GRANT");
1030 else {
1031 is_reusable = false;
1032 ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is not responding");
1033 }
1034 }
1035
1008 txiqcal_done = ar9003_hw_tx_iq_cal_run(ah); 1036 txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
1009 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); 1037 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1010 udelay(5); 1038 udelay(5);
@@ -1022,6 +1050,21 @@ skip_tx_iqcal:
1022 AR_PHY_AGC_CONTROL_CAL, 1050 AR_PHY_AGC_CONTROL_CAL,
1023 0, AH_WAIT_TIMEOUT); 1051 0, AH_WAIT_TIMEOUT);
1024 } 1052 }
1053
1054 if (mci && IS_CHAN_2GHZ(chan) &&
1055 (mci_hw->bt_state == MCI_BT_AWAKE) &&
1056 run_agc_cal &&
1057 !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
1058
1059 u32 pld[4] = {0, 0, 0, 0};
1060
1061 ath_dbg(common, ATH_DBG_MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
1062 mci_hw->wlan_cal_done);
1063 MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
1064 pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
1065 ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
1066 }
1067
1025 if (rtt && !run_rtt_cal) { 1068 if (rtt && !run_rtt_cal) {
1026 agc_ctrl |= agc_supp_cals; 1069 agc_ctrl |= agc_supp_cals;
1027 REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl); 1070 REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index a93bd63ad23b..4ba6f52943a8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4779,7 +4779,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
4779{ 4779{
4780 struct ath_common *common = ath9k_hw_common(ah); 4780 struct ath_common *common = ath9k_hw_common(ah);
4781 struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep; 4781 struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
4782 u16 twiceMaxEdgePower = MAX_RATE_POWER; 4782 u16 twiceMaxEdgePower;
4783 int i; 4783 int i;
4784 u16 scaledPower = 0, minCtlPower; 4784 u16 scaledPower = 0, minCtlPower;
4785 static const u16 ctlModesFor11a[] = { 4785 static const u16 ctlModesFor11a[] = {
@@ -4880,6 +4880,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
4880 ctlNum = AR9300_NUM_CTLS_5G; 4880 ctlNum = AR9300_NUM_CTLS_5G;
4881 } 4881 }
4882 4882
4883 twiceMaxEdgePower = MAX_RATE_POWER;
4883 for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) { 4884 for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
4884 ath_dbg(common, ATH_DBG_REGULATORY, 4885 ath_dbg(common, ATH_DBG_REGULATORY,
4885 "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n", 4886 "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index b363cc06cfd9..631fe4f2e495 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -13,6 +13,7 @@
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16#include <linux/export.h>
16#include "hw.h" 17#include "hw.h"
17#include "ar9003_mac.h" 18#include "ar9003_mac.h"
18 19
@@ -174,20 +175,24 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
174 u32 isr = 0; 175 u32 isr = 0;
175 u32 mask2 = 0; 176 u32 mask2 = 0;
176 struct ath9k_hw_capabilities *pCap = &ah->caps; 177 struct ath9k_hw_capabilities *pCap = &ah->caps;
177 u32 sync_cause = 0;
178 struct ath_common *common = ath9k_hw_common(ah); 178 struct ath_common *common = ath9k_hw_common(ah);
179 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
180 u32 sync_cause = 0, async_cause;
179 181
180 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { 182 async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
183
184 if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
181 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) 185 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
182 == AR_RTC_STATUS_ON) 186 == AR_RTC_STATUS_ON)
183 isr = REG_READ(ah, AR_ISR); 187 isr = REG_READ(ah, AR_ISR);
184 } 188 }
185 189
190
186 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT; 191 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
187 192
188 *masked = 0; 193 *masked = 0;
189 194
190 if (!isr && !sync_cause) 195 if (!isr && !sync_cause && !async_cause)
191 return false; 196 return false;
192 197
193 if (isr) { 198 if (isr) {
@@ -293,6 +298,35 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
293 ar9003_hw_bb_watchdog_read(ah); 298 ar9003_hw_bb_watchdog_read(ah);
294 } 299 }
295 300
301 if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
302 u32 raw_intr, rx_msg_intr;
303
304 rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
305 raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
306
307 if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
308 ath_dbg(common, ATH_DBG_MCI,
309 "MCI gets 0xdeadbeef during MCI int processing"
310 "new raw_intr=0x%08x, new rx_msg_raw=0x%08x, "
311 "raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
312 raw_intr, rx_msg_intr, mci->raw_intr,
313 mci->rx_msg_intr);
314 else {
315 mci->rx_msg_intr |= rx_msg_intr;
316 mci->raw_intr |= raw_intr;
317 *masked |= ATH9K_INT_MCI;
318
319 if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
320 mci->cont_status =
321 REG_READ(ah, AR_MCI_CONT_STATUS);
322
323 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
324 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
325 ath_dbg(common, ATH_DBG_MCI, "AR_INTR_SYNC_MCI\n");
326
327 }
328 }
329
296 if (sync_cause) { 330 if (sync_cause) {
297 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { 331 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
298 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); 332 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
new file mode 100644
index 000000000000..8599822dc83f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -0,0 +1,1464 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/export.h>
18#include "hw.h"
19#include "ar9003_phy.h"
20#include "ar9003_mci.h"
21
22static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
23{
24 if (!AR_SREV_9462_20(ah))
25 return;
26
27 REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
28 AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
29 udelay(1);
30 REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
31 AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
32}
33
34static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
35 u32 bit_position, int time_out)
36{
37 struct ath_common *common = ath9k_hw_common(ah);
38
39 while (time_out) {
40
41 if (REG_READ(ah, address) & bit_position) {
42
43 REG_WRITE(ah, address, bit_position);
44
45 if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
46
47 if (bit_position &
48 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
49 ar9003_mci_reset_req_wakeup(ah);
50
51 if (bit_position &
52 (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
53 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
54 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
55 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
56
57 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
58 AR_MCI_INTERRUPT_RX_MSG);
59 }
60 break;
61 }
62
63 udelay(10);
64 time_out -= 10;
65
66 if (time_out < 0)
67 break;
68 }
69
70 if (time_out <= 0) {
71 ath_dbg(common, ATH_DBG_MCI,
72 "MCI Wait for Reg 0x%08x = 0x%08x timeout.\n",
73 address, bit_position);
74 ath_dbg(common, ATH_DBG_MCI,
75 "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x",
76 REG_READ(ah, AR_MCI_INTERRUPT_RAW),
77 REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
78 time_out = 0;
79 }
80
81 return time_out;
82}
83
84void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
85{
86 u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
87
88 ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
89 wait_done, false);
90 udelay(5);
91}
92
93void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
94{
95 u32 payload = 0x00000000;
96
97 ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
98 wait_done, false);
99}
100
101static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
102{
103 ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
104 NULL, 0, wait_done, false);
105 udelay(5);
106}
107
108void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
109{
110 ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
111 NULL, 0, wait_done, false);
112}
113
114static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
115{
116 u32 payload = 0x70000000;
117
118 ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
119 wait_done, false);
120}
121
122static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
123{
124 ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
125 MCI_FLAG_DISABLE_TIMESTAMP,
126 NULL, 0, wait_done, false);
127}
128
129static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
130 bool wait_done)
131{
132 struct ath_common *common = ath9k_hw_common(ah);
133 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
134 u32 payload[4] = {0, 0, 0, 0};
135
136 if (!mci->bt_version_known &&
137 (mci->bt_state != MCI_BT_SLEEP)) {
138 ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex version query\n");
139 MCI_GPM_SET_TYPE_OPCODE(payload,
140 MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY);
141 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
142 wait_done, true);
143 }
144}
145
146static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
147 bool wait_done)
148{
149 struct ath_common *common = ath9k_hw_common(ah);
150 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
151 u32 payload[4] = {0, 0, 0, 0};
152
153 ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex version response\n");
154 MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
155 MCI_GPM_COEX_VERSION_RESPONSE);
156 *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
157 mci->wlan_ver_major;
158 *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
159 mci->wlan_ver_minor;
160 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
161}
162
163static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
164 bool wait_done)
165{
166 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
167 u32 *payload = &mci->wlan_channels[0];
168
169 if ((mci->wlan_channels_update == true) &&
170 (mci->bt_state != MCI_BT_SLEEP)) {
171 MCI_GPM_SET_TYPE_OPCODE(payload,
172 MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS);
173 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
174 wait_done, true);
175 MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
176 }
177}
178
179static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
180 bool wait_done, u8 query_type)
181{
182 struct ath_common *common = ath9k_hw_common(ah);
183 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
184 u32 payload[4] = {0, 0, 0, 0};
185 bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
186 MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
187
188 if (mci->bt_state != MCI_BT_SLEEP) {
189
190 ath_dbg(common, ATH_DBG_MCI,
191 "MCI Send Coex BT Status Query 0x%02X\n", query_type);
192
193 MCI_GPM_SET_TYPE_OPCODE(payload,
194 MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY);
195
196 *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
197 /*
198 * If bt_status_query message is not sent successfully,
199 * then need_flush_btinfo should be set again.
200 */
201 if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
202 wait_done, true)) {
203 if (query_btinfo) {
204 mci->need_flush_btinfo = true;
205
206 ath_dbg(common, ATH_DBG_MCI,
207 "MCI send bt_status_query fail, "
208 "set flush flag again\n");
209 }
210 }
211
212 if (query_btinfo)
213 mci->query_bt = false;
214 }
215}
216
217void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
218 bool wait_done)
219{
220 struct ath_common *common = ath9k_hw_common(ah);
221 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
222 u32 payload[4] = {0, 0, 0, 0};
223
224 ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex %s BT GPM.\n",
225 (halt) ? "halt" : "unhalt");
226
227 MCI_GPM_SET_TYPE_OPCODE(payload,
228 MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM);
229
230 if (halt) {
231 mci->query_bt = true;
232 /* Send next unhalt no matter halt sent or not */
233 mci->unhalt_bt_gpm = true;
234 mci->need_flush_btinfo = true;
235 *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
236 MCI_GPM_COEX_BT_GPM_HALT;
237 } else
238 *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
239 MCI_GPM_COEX_BT_GPM_UNHALT;
240
241 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
242}
243
244
245static void ar9003_mci_prep_interface(struct ath_hw *ah)
246{
247 struct ath_common *common = ath9k_hw_common(ah);
248 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
249 u32 saved_mci_int_en;
250 u32 mci_timeout = 150;
251
252 mci->bt_state = MCI_BT_SLEEP;
253 saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
254
255 REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
256 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
257 REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
258 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
259 REG_READ(ah, AR_MCI_INTERRUPT_RAW));
260
261 /* Remote Reset */
262 ath_dbg(common, ATH_DBG_MCI, "MCI Reset sequence start\n");
263 ath_dbg(common, ATH_DBG_MCI, "MCI send REMOTE_RESET\n");
264 ar9003_mci_remote_reset(ah, true);
265
266 /*
267 * This delay is required for the reset delay worst case value 255 in
268 * MCI_COMMAND2 register
269 */
270
271 if (AR_SREV_9462_10(ah))
272 udelay(252);
273
274 ath_dbg(common, ATH_DBG_MCI, "MCI Send REQ_WAKE to remoter(BT)\n");
275 ar9003_mci_send_req_wake(ah, true);
276
277 if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
278 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
279
280 ath_dbg(common, ATH_DBG_MCI,
281 "MCI SYS_WAKING from remote(BT)\n");
282 mci->bt_state = MCI_BT_AWAKE;
283
284 if (AR_SREV_9462_10(ah))
285 udelay(10);
286 /*
287 * we don't need to send more remote_reset at this moment.
288 * If BT receive first remote_reset, then BT HW will
289 * be cleaned up and will be able to receive req_wake
290 * and BT HW will respond sys_waking.
291 * In this case, WLAN will receive BT's HW sys_waking.
292 * Otherwise, if BT SW missed initial remote_reset,
293 * that remote_reset will still clean up BT MCI RX,
294 * and the req_wake will wake BT up,
295 * and BT SW will respond this req_wake with a remote_reset and
296 * sys_waking. In this case, WLAN will receive BT's SW
297 * sys_waking. In either case, BT's RX is cleaned up. So we
298 * don't need to reply BT's remote_reset now, if any.
299 * Similarly, if in any case, WLAN can receive BT's sys_waking,
300 * that means WLAN's RX is also fine.
301 */
302
303 /* Send SYS_WAKING to BT */
304
305 ath_dbg(common, ATH_DBG_MCI,
306 "MCI send SW SYS_WAKING to remote BT\n");
307
308 ar9003_mci_send_sys_waking(ah, true);
309 udelay(10);
310
311 /*
312 * Set BT priority interrupt value to be 0xff to
313 * avoid having too many BT PRIORITY interrupts.
314 */
315
316 REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
317 REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
318 REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
319 REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
320 REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
321
322 /*
323 * A contention reset will be received after send out
324 * sys_waking. Also BT priority interrupt bits will be set.
325 * Clear those bits before the next step.
326 */
327
328 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
329 AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
330 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
331 AR_MCI_INTERRUPT_BT_PRI);
332
333 if (AR_SREV_9462_10(ah) || mci->is_2g) {
334 /* Send LNA_TRANS */
335 ath_dbg(common, ATH_DBG_MCI,
336 "MCI send LNA_TRANS to BT\n");
337 ar9003_mci_send_lna_transfer(ah, true);
338 udelay(5);
339 }
340
341 if (AR_SREV_9462_10(ah) || (mci->is_2g &&
342 !mci->update_2g5g)) {
343 if (ar9003_mci_wait_for_interrupt(ah,
344 AR_MCI_INTERRUPT_RX_MSG_RAW,
345 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
346 mci_timeout))
347 ath_dbg(common, ATH_DBG_MCI,
348 "MCI WLAN has control over the LNA & "
349 "BT obeys it\n");
350 else
351 ath_dbg(common, ATH_DBG_MCI,
352 "MCI BT didn't respond to"
353 "LNA_TRANS\n");
354 }
355
356 if (AR_SREV_9462_10(ah)) {
357 /* Send another remote_reset to deassert BT clk_req. */
358 ath_dbg(common, ATH_DBG_MCI,
359 "MCI another remote_reset to "
360 "deassert clk_req\n");
361 ar9003_mci_remote_reset(ah, true);
362 udelay(252);
363 }
364 }
365
366 /* Clear the extra redundant SYS_WAKING from BT */
367 if ((mci->bt_state == MCI_BT_AWAKE) &&
368 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
369 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
370 (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
371 AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
372
373 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
374 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
375 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
376 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
377 }
378
379 REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
380}
381
382void ar9003_mci_disable_interrupt(struct ath_hw *ah)
383{
384 REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
385 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
386}
387
388void ar9003_mci_enable_interrupt(struct ath_hw *ah)
389{
390
391 REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
392 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
393 AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
394}
395
396bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
397{
398 u32 intr;
399
400 intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
401 return ((intr & ints) == ints);
402}
403
404void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
405 u32 *rx_msg_intr)
406{
407 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
408 *raw_intr = mci->raw_intr;
409 *rx_msg_intr = mci->rx_msg_intr;
410
411 /* Clean int bits after the values are read. */
412 mci->raw_intr = 0;
413 mci->rx_msg_intr = 0;
414}
415EXPORT_SYMBOL(ar9003_mci_get_interrupt);
416
417void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
418{
419 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
420
421 if (!mci->update_2g5g &&
422 (mci->is_2g != is_2g))
423 mci->update_2g5g = true;
424
425 mci->is_2g = is_2g;
426}
427
428static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
429{
430 struct ath_common *common = ath9k_hw_common(ah);
431 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
432 u32 *payload;
433 u32 recv_type, offset;
434
435 if (msg_index == MCI_GPM_INVALID)
436 return false;
437
438 offset = msg_index << 4;
439
440 payload = (u32 *)(mci->gpm_buf + offset);
441 recv_type = MCI_GPM_TYPE(payload);
442
443 if (recv_type == MCI_GPM_RSVD_PATTERN) {
444 ath_dbg(common, ATH_DBG_MCI, "MCI Skip RSVD GPM\n");
445 return false;
446 }
447
448 return true;
449}
450
451static void ar9003_mci_observation_set_up(struct ath_hw *ah)
452{
453 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
454 if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
455
456 ath9k_hw_cfg_output(ah, 3,
457 AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
458 ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
459 ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
460 ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
461
462 } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
463
464 ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
465 ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
466 ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
467 ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
468 ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
469
470 } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
471
472 ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
473 ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
474 ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
475 ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
476
477 } else
478 return;
479
480 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
481
482 if (AR_SREV_9462_20_OR_LATER(ah)) {
483 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
484 AR_GLB_DS_JTAG_DISABLE, 1);
485 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
486 AR_GLB_WLAN_UART_INTF_EN, 0);
487 REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL,
488 ATH_MCI_CONFIG_MCI_OBS_GPIO);
489 }
490
491 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
492 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
493 REG_WRITE(ah, AR_OBS, 0x4b);
494 REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
495 REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
496 REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
497 REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
498 REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
499 AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
500}
501
502static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
503 u8 opcode, u32 bt_flags)
504{
505 struct ath_common *common = ath9k_hw_common(ah);
506 u32 pld[4] = {0, 0, 0, 0};
507
508 MCI_GPM_SET_TYPE_OPCODE(pld,
509 MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS);
510
511 *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode;
512 *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
513 *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
514 *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
515 *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
516
517 ath_dbg(common, ATH_DBG_MCI,
518 "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n",
519 (opcode == MCI_GPM_COEX_BT_FLAGS_READ) ? "READ" :
520 ((opcode == MCI_GPM_COEX_BT_FLAGS_SET) ? "SET" : "CLEAR"),
521 bt_flags);
522
523 return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
524 wait_done, true);
525}
526
527void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
528 bool is_full_sleep)
529{
530 struct ath_common *common = ath9k_hw_common(ah);
531 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
532 u32 regval, thresh;
533
534 ath_dbg(common, ATH_DBG_MCI, "MCI full_sleep = %d, is_2g = %d\n",
535 is_full_sleep, is_2g);
536
537 /*
538 * GPM buffer and scheduling message buffer are not allocated
539 */
540
541 if (!mci->gpm_addr && !mci->sched_addr) {
542 ath_dbg(common, ATH_DBG_MCI,
543 "MCI GPM and schedule buffers are not allocated");
544 return;
545 }
546
547 if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
548 ath_dbg(common, ATH_DBG_MCI,
549 "MCI it's deadbeef, quit mci_reset\n");
550 return;
551 }
552
553 /* Program MCI DMA related registers */
554 REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
555 REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
556 REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
557
558 /*
559 * To avoid MCI state machine be affected by incoming remote MCI msgs,
560 * MCI mode will be enabled later, right before reset the MCI TX and RX.
561 */
562
563 regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
564 SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
565 SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
566 SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
567 SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
568 SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
569 SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
570 SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
571 SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
572
573 if (is_2g && (AR_SREV_9462_20(ah)) &&
574 !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
575
576 regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
577 ath_dbg(common, ATH_DBG_MCI,
578 "MCI sched one step look ahead\n");
579
580 if (!(mci->config &
581 ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
582
583 thresh = MS(mci->config,
584 ATH_MCI_CONFIG_AGGR_THRESH);
585 thresh &= 7;
586 regval |= SM(1,
587 AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN);
588 regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH);
589
590 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
591 AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
592 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
593 AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
594
595 } else
596 ath_dbg(common, ATH_DBG_MCI,
597 "MCI sched aggr thresh: off\n");
598 } else
599 ath_dbg(common, ATH_DBG_MCI,
600 "MCI SCHED one step look ahead off\n");
601
602 if (AR_SREV_9462_10(ah))
603 regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10);
604
605 REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
606
607 if (AR_SREV_9462_20(ah)) {
608 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
609 AR_BTCOEX_CTRL_SPDT_ENABLE);
610 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
611 AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
612 }
613
614 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
615 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
616
617 thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
618 REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh);
619 REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
620
621 /* Resetting the Rx and Tx paths of MCI */
622 regval = REG_READ(ah, AR_MCI_COMMAND2);
623 regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
624 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
625
626 udelay(1);
627
628 regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
629 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
630
631 if (is_full_sleep) {
632 ar9003_mci_mute_bt(ah);
633 udelay(100);
634 }
635
636 regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
637 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
638 udelay(1);
639 regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
640 REG_WRITE(ah, AR_MCI_COMMAND2, regval);
641
642 ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
643 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
644 (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
645 SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
646
647 REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
648 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
649
650 if (AR_SREV_9462_20_OR_LATER(ah))
651 ar9003_mci_observation_set_up(ah);
652
653 mci->ready = true;
654 ar9003_mci_prep_interface(ah);
655
656 if (en_int)
657 ar9003_mci_enable_interrupt(ah);
658}
659
660void ar9003_mci_mute_bt(struct ath_hw *ah)
661{
662 struct ath_common *common = ath9k_hw_common(ah);
663
664 /* disable all MCI messages */
665 REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
666 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
667 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
668 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
669 REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
670 REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
671
672 /* wait pending HW messages to flush out */
673 udelay(10);
674
675 /*
676 * Send LNA_TAKE and SYS_SLEEPING when
677 * 1. reset not after resuming from full sleep
678 * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
679 */
680
681 ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA take\n");
682 ar9003_mci_send_lna_take(ah, true);
683
684 udelay(5);
685
686 ath_dbg(common, ATH_DBG_MCI, "MCI Send sys sleeping\n");
687 ar9003_mci_send_sys_sleeping(ah, true);
688}
689
690void ar9003_mci_sync_bt_state(struct ath_hw *ah)
691{
692 struct ath_common *common = ath9k_hw_common(ah);
693 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
694 u32 cur_bt_state;
695
696 cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
697
698 if (mci->bt_state != cur_bt_state) {
699 ath_dbg(common, ATH_DBG_MCI,
700 "MCI BT state mismatches. old: %d, new: %d\n",
701 mci->bt_state, cur_bt_state);
702 mci->bt_state = cur_bt_state;
703 }
704
705 if (mci->bt_state != MCI_BT_SLEEP) {
706
707 ar9003_mci_send_coex_version_query(ah, true);
708 ar9003_mci_send_coex_wlan_channels(ah, true);
709
710 if (mci->unhalt_bt_gpm == true) {
711 ath_dbg(common, ATH_DBG_MCI, "MCI unhalt BT GPM");
712 ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
713 }
714 }
715}
716
717static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
718{
719 struct ath_common *common = ath9k_hw_common(ah);
720 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
721 u32 new_flags, to_set, to_clear;
722
723 if (AR_SREV_9462_20(ah) &&
724 mci->update_2g5g &&
725 (mci->bt_state != MCI_BT_SLEEP)) {
726
727 if (mci->is_2g) {
728 new_flags = MCI_2G_FLAGS;
729 to_clear = MCI_2G_FLAGS_CLEAR_MASK;
730 to_set = MCI_2G_FLAGS_SET_MASK;
731 } else {
732 new_flags = MCI_5G_FLAGS;
733 to_clear = MCI_5G_FLAGS_CLEAR_MASK;
734 to_set = MCI_5G_FLAGS_SET_MASK;
735 }
736
737 ath_dbg(common, ATH_DBG_MCI,
738 "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n",
739 mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set);
740
741 if (to_clear)
742 ar9003_mci_send_coex_bt_flags(ah, wait_done,
743 MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear);
744
745 if (to_set)
746 ar9003_mci_send_coex_bt_flags(ah, wait_done,
747 MCI_GPM_COEX_BT_FLAGS_SET, to_set);
748 }
749
750 if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP))
751 mci->update_2g5g = false;
752}
753
754static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
755 u32 *payload, bool queue)
756{
757 struct ath_common *common = ath9k_hw_common(ah);
758 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
759 u8 type, opcode;
760
761 if (queue) {
762
763 if (payload)
764 ath_dbg(common, ATH_DBG_MCI,
765 "MCI ERROR: Send fail: %02x: %02x %02x %02x\n",
766 header,
767 *(((u8 *)payload) + 4),
768 *(((u8 *)payload) + 5),
769 *(((u8 *)payload) + 6));
770 else
771 ath_dbg(common, ATH_DBG_MCI,
772 "MCI ERROR: Send fail: %02x\n", header);
773 }
774
775 /* check if the message is to be queued */
776 if (header != MCI_GPM)
777 return;
778
779 type = MCI_GPM_TYPE(payload);
780 opcode = MCI_GPM_OPCODE(payload);
781
782 if (type != MCI_GPM_COEX_AGENT)
783 return;
784
785 switch (opcode) {
786 case MCI_GPM_COEX_BT_UPDATE_FLAGS:
787
788 if (AR_SREV_9462_10(ah))
789 break;
790
791 if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
792 MCI_GPM_COEX_BT_FLAGS_READ)
793 break;
794
795 mci->update_2g5g = queue;
796
797 if (queue)
798 ath_dbg(common, ATH_DBG_MCI,
799 "MCI BT_MCI_FLAGS: 2G5G status <queued> %s.\n",
800 mci->is_2g ? "2G" : "5G");
801 else
802 ath_dbg(common, ATH_DBG_MCI,
803 "MCI BT_MCI_FLAGS: 2G5G status <sent> %s.\n",
804 mci->is_2g ? "2G" : "5G");
805
806 break;
807
808 case MCI_GPM_COEX_WLAN_CHANNELS:
809
810 mci->wlan_channels_update = queue;
811 if (queue)
812 ath_dbg(common, ATH_DBG_MCI,
813 "MCI WLAN channel map <queued>\n");
814 else
815 ath_dbg(common, ATH_DBG_MCI,
816 "MCI WLAN channel map <sent>\n");
817 break;
818
819 case MCI_GPM_COEX_HALT_BT_GPM:
820
821 if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
822 MCI_GPM_COEX_BT_GPM_UNHALT) {
823
824 mci->unhalt_bt_gpm = queue;
825
826 if (queue)
827 ath_dbg(common, ATH_DBG_MCI,
828 "MCI UNHALT BT GPM <queued>\n");
829 else {
830 mci->halted_bt_gpm = false;
831 ath_dbg(common, ATH_DBG_MCI,
832 "MCI UNHALT BT GPM <sent>\n");
833 }
834 }
835
836 if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
837 MCI_GPM_COEX_BT_GPM_HALT) {
838
839 mci->halted_bt_gpm = !queue;
840
841 if (queue)
842 ath_dbg(common, ATH_DBG_MCI,
843 "MCI HALT BT GPM <not sent>\n");
844 else
845 ath_dbg(common, ATH_DBG_MCI,
846 "MCI UNHALT BT GPM <sent>\n");
847 }
848
849 break;
850 default:
851 break;
852 }
853}
854
855void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
856{
857 struct ath_common *common = ath9k_hw_common(ah);
858 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
859
860 if (mci->update_2g5g) {
861 if (mci->is_2g) {
862
863 ar9003_mci_send_2g5g_status(ah, true);
864 ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA trans\n");
865 ar9003_mci_send_lna_transfer(ah, true);
866 udelay(5);
867
868 REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
869 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
870
871 if (AR_SREV_9462_20(ah)) {
872 REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
873 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
874 if (!(mci->config &
875 ATH_MCI_CONFIG_DISABLE_OSLA)) {
876 REG_SET_BIT(ah, AR_BTCOEX_CTRL,
877 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
878 }
879 }
880 } else {
881 ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA take\n");
882 ar9003_mci_send_lna_take(ah, true);
883 udelay(5);
884
885 REG_SET_BIT(ah, AR_MCI_TX_CTRL,
886 AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
887
888 if (AR_SREV_9462_20(ah)) {
889 REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
890 AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
891 REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
892 AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
893 }
894
895 ar9003_mci_send_2g5g_status(ah, true);
896 }
897 }
898}
899
900bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
901 u32 *payload, u8 len, bool wait_done,
902 bool check_bt)
903{
904 struct ath_common *common = ath9k_hw_common(ah);
905 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
906 bool msg_sent = false;
907 u32 regval;
908 u32 saved_mci_int_en;
909 int i;
910
911 saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
912 regval = REG_READ(ah, AR_BTCOEX_CTRL);
913
914 if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
915
916 ath_dbg(common, ATH_DBG_MCI,
917 "MCI Not sending 0x%x. MCI is not enabled. "
918 "full_sleep = %d\n", header,
919 (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
920
921 ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
922 return false;
923
924 } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
925
926 ath_dbg(common, ATH_DBG_MCI,
927 "MCI Don't send message 0x%x. BT is in sleep state\n", header);
928
929 ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
930 return false;
931 }
932
933 if (wait_done)
934 REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
935
936 /* Need to clear SW_MSG_DONE raw bit before wait */
937
938 REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
939 (AR_MCI_INTERRUPT_SW_MSG_DONE |
940 AR_MCI_INTERRUPT_MSG_FAIL_MASK));
941
942 if (payload) {
943 for (i = 0; (i * 4) < len; i++)
944 REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
945 *(payload + i));
946 }
947
948 REG_WRITE(ah, AR_MCI_COMMAND0,
949 (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
950 AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
951 SM(len, AR_MCI_COMMAND0_LEN) |
952 SM(header, AR_MCI_COMMAND0_HEADER)));
953
954 if (wait_done &&
955 !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
956 AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
957 ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
958 else {
959 ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
960 msg_sent = true;
961 }
962
963 if (wait_done)
964 REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
965
966 return msg_sent;
967}
968EXPORT_SYMBOL(ar9003_mci_send_message);
969
970void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
971 u16 len, u32 sched_addr)
972{
973 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
974 void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr));
975
976 mci->gpm_addr = gpm_addr;
977 mci->gpm_buf = gpm_buf;
978 mci->gpm_len = len;
979 mci->sched_addr = sched_addr;
980 mci->sched_buf = sched_buf;
981
982 ar9003_mci_reset(ah, true, true, true);
983}
984EXPORT_SYMBOL(ar9003_mci_setup);
985
986void ar9003_mci_cleanup(struct ath_hw *ah)
987{
988 struct ath_common *common = ath9k_hw_common(ah);
989
990 /* Turn off MCI and Jupiter mode. */
991 REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
992 ath_dbg(common, ATH_DBG_MCI, "MCI ar9003_mci_cleanup\n");
993 ar9003_mci_disable_interrupt(ah);
994}
995EXPORT_SYMBOL(ar9003_mci_cleanup);
996
997static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
998 u8 gpm_opcode, u32 *p_gpm)
999{
1000 struct ath_common *common = ath9k_hw_common(ah);
1001 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1002 u8 *p_data = (u8 *) p_gpm;
1003
1004 if (gpm_type != MCI_GPM_COEX_AGENT)
1005 return;
1006
1007 switch (gpm_opcode) {
1008 case MCI_GPM_COEX_VERSION_QUERY:
1009 ath_dbg(common, ATH_DBG_MCI,
1010 "MCI Recv GPM COEX Version Query\n");
1011 ar9003_mci_send_coex_version_response(ah, true);
1012 break;
1013 case MCI_GPM_COEX_VERSION_RESPONSE:
1014 ath_dbg(common, ATH_DBG_MCI,
1015 "MCI Recv GPM COEX Version Response\n");
1016 mci->bt_ver_major =
1017 *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
1018 mci->bt_ver_minor =
1019 *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
1020 mci->bt_version_known = true;
1021 ath_dbg(common, ATH_DBG_MCI,
1022 "MCI BT Coex version: %d.%d\n",
1023 mci->bt_ver_major,
1024 mci->bt_ver_minor);
1025 break;
1026 case MCI_GPM_COEX_STATUS_QUERY:
1027 ath_dbg(common, ATH_DBG_MCI,
1028 "MCI Recv GPM COEX Status Query = 0x%02X.\n",
1029 *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
1030 mci->wlan_channels_update = true;
1031 ar9003_mci_send_coex_wlan_channels(ah, true);
1032 break;
1033 case MCI_GPM_COEX_BT_PROFILE_INFO:
1034 mci->query_bt = true;
1035 ath_dbg(common, ATH_DBG_MCI,
1036 "MCI Recv GPM COEX BT_Profile_Info\n");
1037 break;
1038 case MCI_GPM_COEX_BT_STATUS_UPDATE:
1039 mci->query_bt = true;
1040 ath_dbg(common, ATH_DBG_MCI,
1041 "MCI Recv GPM COEX BT_Status_Update "
1042 "SEQ=%d (drop&query)\n", *(p_gpm + 3));
1043 break;
1044 default:
1045 break;
1046 }
1047}
1048
1049u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
1050 u8 gpm_opcode, int time_out)
1051{
1052 struct ath_common *common = ath9k_hw_common(ah);
1053 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1054 u32 *p_gpm = NULL, mismatch = 0, more_data;
1055 u32 offset;
1056 u8 recv_type = 0, recv_opcode = 0;
1057 bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
1058
1059 more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
1060
1061 while (time_out > 0) {
1062 if (p_gpm) {
1063 MCI_GPM_RECYCLE(p_gpm);
1064 p_gpm = NULL;
1065 }
1066
1067 if (more_data != MCI_GPM_MORE)
1068 time_out = ar9003_mci_wait_for_interrupt(ah,
1069 AR_MCI_INTERRUPT_RX_MSG_RAW,
1070 AR_MCI_INTERRUPT_RX_MSG_GPM,
1071 time_out);
1072
1073 if (!time_out)
1074 break;
1075
1076 offset = ar9003_mci_state(ah,
1077 MCI_STATE_NEXT_GPM_OFFSET, &more_data);
1078
1079 if (offset == MCI_GPM_INVALID)
1080 continue;
1081
1082 p_gpm = (u32 *) (mci->gpm_buf + offset);
1083 recv_type = MCI_GPM_TYPE(p_gpm);
1084 recv_opcode = MCI_GPM_OPCODE(p_gpm);
1085
1086 if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
1087
1088 if (recv_type == gpm_type) {
1089
1090 if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
1091 !b_is_bt_cal_done) {
1092 gpm_type = MCI_GPM_BT_CAL_GRANT;
1093 ath_dbg(common, ATH_DBG_MCI,
1094 "MCI Recv BT_CAL_DONE"
1095 "wait BT_CAL_GRANT\n");
1096 continue;
1097 }
1098
1099 break;
1100 }
1101 } else if ((recv_type == gpm_type) &&
1102 (recv_opcode == gpm_opcode))
1103 break;
1104
1105 /* not expected message */
1106
1107 /*
1108 * check if it's cal_grant
1109 *
1110 * When we're waiting for cal_grant in reset routine,
1111 * it's possible that BT sends out cal_request at the
1112 * same time. Since BT's calibration doesn't happen
1113 * that often, we'll let BT completes calibration then
1114 * we continue to wait for cal_grant from BT.
1115 * Orginal: Wait BT_CAL_GRANT.
1116 * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
1117 * BT_CAL_DONE -> Wait BT_CAL_GRANT.
1118 */
1119
1120 if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
1121 (recv_type == MCI_GPM_BT_CAL_REQ)) {
1122
1123 u32 payload[4] = {0, 0, 0, 0};
1124
1125 gpm_type = MCI_GPM_BT_CAL_DONE;
1126 ath_dbg(common, ATH_DBG_MCI,
1127 "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n");
1128
1129 MCI_GPM_SET_CAL_TYPE(payload,
1130 MCI_GPM_WLAN_CAL_GRANT);
1131
1132 ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
1133 false, false);
1134
1135 ath_dbg(common, ATH_DBG_MCI,
1136 "MCI now wait for BT_CAL_DONE\n");
1137
1138 continue;
1139 } else {
1140 ath_dbg(common, ATH_DBG_MCI, "MCI GPM subtype"
1141 "not match 0x%x\n", *(p_gpm + 1));
1142 mismatch++;
1143 ar9003_mci_process_gpm_extra(ah, recv_type,
1144 recv_opcode, p_gpm);
1145 }
1146 }
1147 if (p_gpm) {
1148 MCI_GPM_RECYCLE(p_gpm);
1149 p_gpm = NULL;
1150 }
1151
1152 if (time_out <= 0) {
1153 time_out = 0;
1154 ath_dbg(common, ATH_DBG_MCI,
1155 "MCI GPM received timeout, mismatch = %d\n", mismatch);
1156 } else
1157 ath_dbg(common, ATH_DBG_MCI,
1158 "MCI Receive GPM type=0x%x, code=0x%x\n",
1159 gpm_type, gpm_opcode);
1160
1161 while (more_data == MCI_GPM_MORE) {
1162
1163 ath_dbg(common, ATH_DBG_MCI, "MCI discard remaining GPM\n");
1164 offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
1165 &more_data);
1166
1167 if (offset == MCI_GPM_INVALID)
1168 break;
1169
1170 p_gpm = (u32 *) (mci->gpm_buf + offset);
1171 recv_type = MCI_GPM_TYPE(p_gpm);
1172 recv_opcode = MCI_GPM_OPCODE(p_gpm);
1173
1174 if (!MCI_GPM_IS_CAL_TYPE(recv_type))
1175 ar9003_mci_process_gpm_extra(ah, recv_type,
1176 recv_opcode, p_gpm);
1177
1178 MCI_GPM_RECYCLE(p_gpm);
1179 }
1180
1181 return time_out;
1182}
1183
1184u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
1185{
1186 struct ath_common *common = ath9k_hw_common(ah);
1187 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1188 u32 value = 0, more_gpm = 0, gpm_ptr;
1189 u8 query_type;
1190
1191 switch (state_type) {
1192 case MCI_STATE_ENABLE:
1193 if (mci->ready) {
1194
1195 value = REG_READ(ah, AR_BTCOEX_CTRL);
1196
1197 if ((value == 0xdeadbeef) || (value == 0xffffffff))
1198 value = 0;
1199 }
1200 value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
1201 break;
1202 case MCI_STATE_INIT_GPM_OFFSET:
1203 value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1204 ath_dbg(common, ATH_DBG_MCI,
1205 "MCI GPM initial WRITE_PTR=%d\n", value);
1206 mci->gpm_idx = value;
1207 break;
1208 case MCI_STATE_NEXT_GPM_OFFSET:
1209 case MCI_STATE_LAST_GPM_OFFSET:
1210 /*
1211 * This could be useful to avoid new GPM message interrupt which
1212 * may lead to spurious interrupt after power sleep, or multiple
1213 * entry of ath_mci_intr().
1214 * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
1215 * alleviate this effect, but clearing GPM RX interrupt bit is
1216 * safe, because whether this is called from hw or driver code
1217 * there must be an interrupt bit set/triggered initially
1218 */
1219 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
1220 AR_MCI_INTERRUPT_RX_MSG_GPM);
1221
1222 gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
1223 value = gpm_ptr;
1224
1225 if (value == 0)
1226 value = mci->gpm_len - 1;
1227 else if (value >= mci->gpm_len) {
1228 if (value != 0xFFFF) {
1229 value = 0;
1230 ath_dbg(common, ATH_DBG_MCI, "MCI GPM offset"
1231 "out of range\n");
1232 }
1233 } else
1234 value--;
1235
1236 if (value == 0xFFFF) {
1237 value = MCI_GPM_INVALID;
1238 more_gpm = MCI_GPM_NOMORE;
1239 ath_dbg(common, ATH_DBG_MCI, "MCI GPM ptr invalid"
1240 "@ptr=%d, offset=%d, more=GPM_NOMORE\n",
1241 gpm_ptr, value);
1242 } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
1243
1244 if (gpm_ptr == mci->gpm_idx) {
1245 value = MCI_GPM_INVALID;
1246 more_gpm = MCI_GPM_NOMORE;
1247
1248 ath_dbg(common, ATH_DBG_MCI, "MCI GPM message"
1249 "not available @ptr=%d, @offset=%d,"
1250 "more=GPM_NOMORE\n", gpm_ptr, value);
1251 } else {
1252 for (;;) {
1253
1254 u32 temp_index;
1255
1256 /* skip reserved GPM if any */
1257
1258 if (value != mci->gpm_idx)
1259 more_gpm = MCI_GPM_MORE;
1260 else
1261 more_gpm = MCI_GPM_NOMORE;
1262
1263 temp_index = mci->gpm_idx;
1264 mci->gpm_idx++;
1265
1266 if (mci->gpm_idx >=
1267 mci->gpm_len)
1268 mci->gpm_idx = 0;
1269
1270 ath_dbg(common, ATH_DBG_MCI,
1271 "MCI GPM message got ptr=%d,"
1272 "@offset=%d, more=%d\n",
1273 gpm_ptr, temp_index,
1274 (more_gpm == MCI_GPM_MORE));
1275
1276 if (ar9003_mci_is_gpm_valid(ah,
1277 temp_index)) {
1278 value = temp_index;
1279 break;
1280 }
1281
1282 if (more_gpm == MCI_GPM_NOMORE) {
1283 value = MCI_GPM_INVALID;
1284 break;
1285 }
1286 }
1287 }
1288 if (p_data)
1289 *p_data = more_gpm;
1290 }
1291
1292 if (value != MCI_GPM_INVALID)
1293 value <<= 4;
1294
1295 break;
1296 case MCI_STATE_LAST_SCHD_MSG_OFFSET:
1297 value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
1298 AR_MCI_RX_LAST_SCHD_MSG_INDEX);
1299 /* Make it in bytes */
1300 value <<= 4;
1301 break;
1302
1303 case MCI_STATE_REMOTE_SLEEP:
1304 value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
1305 AR_MCI_RX_REMOTE_SLEEP) ?
1306 MCI_BT_SLEEP : MCI_BT_AWAKE;
1307 break;
1308
1309 case MCI_STATE_CONT_RSSI_POWER:
1310 value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
1311 break;
1312
1313 case MCI_STATE_CONT_PRIORITY:
1314 value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
1315 break;
1316
1317 case MCI_STATE_CONT_TXRX:
1318 value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
1319 break;
1320
1321 case MCI_STATE_BT:
1322 value = mci->bt_state;
1323 break;
1324
1325 case MCI_STATE_SET_BT_SLEEP:
1326 mci->bt_state = MCI_BT_SLEEP;
1327 break;
1328
1329 case MCI_STATE_SET_BT_AWAKE:
1330 mci->bt_state = MCI_BT_AWAKE;
1331 ar9003_mci_send_coex_version_query(ah, true);
1332 ar9003_mci_send_coex_wlan_channels(ah, true);
1333
1334 if (mci->unhalt_bt_gpm) {
1335
1336 ath_dbg(common, ATH_DBG_MCI,
1337 "MCI unhalt BT GPM\n");
1338 ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
1339 }
1340
1341 ar9003_mci_2g5g_switch(ah, true);
1342 break;
1343
1344 case MCI_STATE_SET_BT_CAL_START:
1345 mci->bt_state = MCI_BT_CAL_START;
1346 break;
1347
1348 case MCI_STATE_SET_BT_CAL:
1349 mci->bt_state = MCI_BT_CAL;
1350 break;
1351
1352 case MCI_STATE_RESET_REQ_WAKE:
1353 ar9003_mci_reset_req_wakeup(ah);
1354 mci->update_2g5g = true;
1355
1356 if ((AR_SREV_9462_20_OR_LATER(ah)) &&
1357 (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) {
1358 /* Check if we still have control of the GPIOs */
1359 if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
1360 ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
1361 ATH_MCI_CONFIG_MCI_OBS_GPIO) {
1362
1363 ath_dbg(common, ATH_DBG_MCI,
1364 "MCI reconfigure observation");
1365 ar9003_mci_observation_set_up(ah);
1366 }
1367 }
1368 break;
1369
1370 case MCI_STATE_SEND_WLAN_COEX_VERSION:
1371 ar9003_mci_send_coex_version_response(ah, true);
1372 break;
1373
1374 case MCI_STATE_SET_BT_COEX_VERSION:
1375
1376 if (!p_data)
1377 ath_dbg(common, ATH_DBG_MCI,
1378 "MCI Set BT Coex version with NULL data!!\n");
1379 else {
1380 mci->bt_ver_major = (*p_data >> 8) & 0xff;
1381 mci->bt_ver_minor = (*p_data) & 0xff;
1382 mci->bt_version_known = true;
1383 ath_dbg(common, ATH_DBG_MCI,
1384 "MCI BT version set: %d.%d\n",
1385 mci->bt_ver_major,
1386 mci->bt_ver_minor);
1387 }
1388 break;
1389
1390 case MCI_STATE_SEND_WLAN_CHANNELS:
1391 if (p_data) {
1392 if (((mci->wlan_channels[1] & 0xffff0000) ==
1393 (*(p_data + 1) & 0xffff0000)) &&
1394 (mci->wlan_channels[2] == *(p_data + 2)) &&
1395 (mci->wlan_channels[3] == *(p_data + 3)))
1396 break;
1397
1398 mci->wlan_channels[0] = *p_data++;
1399 mci->wlan_channels[1] = *p_data++;
1400 mci->wlan_channels[2] = *p_data++;
1401 mci->wlan_channels[3] = *p_data++;
1402 }
1403 mci->wlan_channels_update = true;
1404 ar9003_mci_send_coex_wlan_channels(ah, true);
1405 break;
1406
1407 case MCI_STATE_SEND_VERSION_QUERY:
1408 ar9003_mci_send_coex_version_query(ah, true);
1409 break;
1410
1411 case MCI_STATE_SEND_STATUS_QUERY:
1412 query_type = (AR_SREV_9462_10(ah)) ?
1413 MCI_GPM_COEX_QUERY_BT_ALL_INFO :
1414 MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
1415
1416 ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
1417 break;
1418
1419 case MCI_STATE_NEED_FLUSH_BT_INFO:
1420 /*
1421 * btcoex_hw.mci.unhalt_bt_gpm means whether it's
1422 * needed to send UNHALT message. It's set whenever
1423 * there's a request to send HALT message.
1424 * mci_halted_bt_gpm means whether HALT message is sent
1425 * out successfully.
1426 *
1427 * Checking (mci_unhalt_bt_gpm == false) instead of
1428 * checking (ah->mci_halted_bt_gpm == false) will make
1429 * sure currently is in UNHALT-ed mode and BT can
1430 * respond to status query.
1431 */
1432 value = (!mci->unhalt_bt_gpm &&
1433 mci->need_flush_btinfo) ? 1 : 0;
1434 if (p_data)
1435 mci->need_flush_btinfo =
1436 (*p_data != 0) ? true : false;
1437 break;
1438
1439 case MCI_STATE_RECOVER_RX:
1440
1441 ath_dbg(common, ATH_DBG_MCI, "MCI hw RECOVER_RX\n");
1442 ar9003_mci_prep_interface(ah);
1443 mci->query_bt = true;
1444 mci->need_flush_btinfo = true;
1445 ar9003_mci_send_coex_wlan_channels(ah, true);
1446 ar9003_mci_2g5g_switch(ah, true);
1447 break;
1448
1449 case MCI_STATE_NEED_FTP_STOMP:
1450 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
1451 break;
1452
1453 case MCI_STATE_NEED_TUNING:
1454 value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
1455 break;
1456
1457 default:
1458 break;
1459
1460 }
1461
1462 return value;
1463}
1464EXPORT_SYMBOL(ar9003_mci_state);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
new file mode 100644
index 000000000000..798da116a44c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef AR9003_MCI_H
18#define AR9003_MCI_H
19
20#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
21
22/* Default remote BT device MCI COEX version */
23#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
24#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT 0
25
26/* Local WLAN MCI COEX version */
27#define MCI_GPM_COEX_MAJOR_VERSION_WLAN 3
28#define MCI_GPM_COEX_MINOR_VERSION_WLAN 0
29
30enum mci_gpm_coex_query_type {
31 MCI_GPM_COEX_QUERY_BT_ALL_INFO = BIT(0),
32 MCI_GPM_COEX_QUERY_BT_TOPOLOGY = BIT(1),
33 MCI_GPM_COEX_QUERY_BT_DEBUG = BIT(2),
34};
35
36enum mci_gpm_coex_halt_bt_gpm {
37 MCI_GPM_COEX_BT_GPM_UNHALT,
38 MCI_GPM_COEX_BT_GPM_HALT
39};
40
41enum mci_gpm_coex_bt_update_flags_op {
42 MCI_GPM_COEX_BT_FLAGS_READ,
43 MCI_GPM_COEX_BT_FLAGS_SET,
44 MCI_GPM_COEX_BT_FLAGS_CLEAR
45};
46
47#define MCI_NUM_BT_CHANNELS 79
48
49#define MCI_BT_MCI_FLAGS_UPDATE_CORR 0x00000002
50#define MCI_BT_MCI_FLAGS_UPDATE_HDR 0x00000004
51#define MCI_BT_MCI_FLAGS_UPDATE_PLD 0x00000008
52#define MCI_BT_MCI_FLAGS_LNA_CTRL 0x00000010
53#define MCI_BT_MCI_FLAGS_DEBUG 0x00000020
54#define MCI_BT_MCI_FLAGS_SCHED_MSG 0x00000040
55#define MCI_BT_MCI_FLAGS_CONT_MSG 0x00000080
56#define MCI_BT_MCI_FLAGS_COEX_GPM 0x00000100
57#define MCI_BT_MCI_FLAGS_CPU_INT_MSG 0x00000200
58#define MCI_BT_MCI_FLAGS_MCI_MODE 0x00000400
59#define MCI_BT_MCI_FLAGS_AR9462_MODE 0x00001000
60#define MCI_BT_MCI_FLAGS_OTHER 0x00010000
61
62#define MCI_DEFAULT_BT_MCI_FLAGS 0x00011dde
63
64#define MCI_TOGGLE_BT_MCI_FLAGS (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
65 MCI_BT_MCI_FLAGS_UPDATE_HDR | \
66 MCI_BT_MCI_FLAGS_UPDATE_PLD | \
67 MCI_BT_MCI_FLAGS_MCI_MODE)
68
69#define MCI_2G_FLAGS_CLEAR_MASK 0x00000000
70#define MCI_2G_FLAGS_SET_MASK MCI_TOGGLE_BT_MCI_FLAGS
71#define MCI_2G_FLAGS MCI_DEFAULT_BT_MCI_FLAGS
72
73#define MCI_5G_FLAGS_CLEAR_MASK MCI_TOGGLE_BT_MCI_FLAGS
74#define MCI_5G_FLAGS_SET_MASK 0x00000000
75#define MCI_5G_FLAGS (MCI_DEFAULT_BT_MCI_FLAGS & \
76 ~MCI_TOGGLE_BT_MCI_FLAGS)
77
78/*
79 * Default value for AR9462 is 0x00002201
80 */
81#define ATH_MCI_CONFIG_CONCUR_TX 0x00000003
82#define ATH_MCI_CONFIG_MCI_OBS_MCI 0x00000004
83#define ATH_MCI_CONFIG_MCI_OBS_TXRX 0x00000008
84#define ATH_MCI_CONFIG_MCI_OBS_BT 0x00000010
85#define ATH_MCI_CONFIG_DISABLE_MCI_CAL 0x00000020
86#define ATH_MCI_CONFIG_DISABLE_OSLA 0x00000040
87#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP 0x00000080
88#define ATH_MCI_CONFIG_AGGR_THRESH 0x00000700
89#define ATH_MCI_CONFIG_AGGR_THRESH_S 8
90#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH 0x00000800
91#define ATH_MCI_CONFIG_CLK_DIV 0x00003000
92#define ATH_MCI_CONFIG_CLK_DIV_S 12
93#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000
94#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000
95#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000
96
97#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \
98 ATH_MCI_CONFIG_MCI_OBS_TXRX | \
99 ATH_MCI_CONFIG_MCI_OBS_BT)
100#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
101
102#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 0c462c904cbe..a4450cba0653 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/export.h>
17#include "hw.h" 18#include "hw.h"
18#include "ar9003_phy.h" 19#include "ar9003_phy.h"
19 20
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 04b060af5087..e41d26939ab8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/export.h>
17#include "hw.h" 18#include "hw.h"
18#include "ar9003_phy.h" 19#include "ar9003_phy.h"
19 20
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 497d7461838a..ed64114571fc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -490,6 +490,8 @@
490#define AR_PHY_TEST_CTL_TSTADC_EN_S 8 490#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
491#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00 491#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
492#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10 492#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
493#define AR_PHY_TEST_CTL_DEBUGPORT_SEL 0xe0000000
494#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
493 495
494 496
495#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168) 497#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
@@ -1001,6 +1003,7 @@
1001 1003
1002/* GLB Registers */ 1004/* GLB Registers */
1003#define AR_GLB_BASE 0x20000 1005#define AR_GLB_BASE 0x20000
1006#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
1004#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44) 1007#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
1005#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \ 1008#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
1006 (AR_SREV_9462_20(_ah) ? 0x4c : 0x50)) 1009 (AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 259a6f312afb..dc2054f0378e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -41,24 +41,24 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
41 41
42static const u32 ar9462_2p0_baseband_postamble[][5] = { 42static const u32 ar9462_2p0_baseband_postamble[][5] = {
43 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 43 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
44 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011}, 44 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
45 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e}, 45 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
46 {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x5ac640de}, 46 {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
47 {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x0796be89}, 47 {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
48 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, 48 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
49 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, 49 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
50 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, 50 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
51 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, 51 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
52 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, 52 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
53 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, 53 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
54 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x92c84d2e}, 54 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
55 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, 55 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
56 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 56 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
57 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 57 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
58 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 58 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
59 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 59 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
60 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782}, 60 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
61 {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, 61 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
62 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 62 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
63 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 63 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
64 {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0}, 64 {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
@@ -81,6 +81,15 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
81 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, 81 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
82 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 82 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
83 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 83 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
84 {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
85 {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
86 {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
87 {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
88 {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
89 {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
90 {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
91 {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
92 {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
84 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, 93 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
85 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, 94 {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
86 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 95 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1107,11 +1116,11 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
1107 {0x00009e30, 0x06336f77}, 1116 {0x00009e30, 0x06336f77},
1108 {0x00009e34, 0x6af6532f}, 1117 {0x00009e34, 0x6af6532f},
1109 {0x00009e38, 0x0cc80c00}, 1118 {0x00009e38, 0x0cc80c00},
1110 {0x00009e40, 0x0d261820}, 1119 {0x00009e40, 0x15262820},
1111 {0x00009e4c, 0x00001004}, 1120 {0x00009e4c, 0x00001004},
1112 {0x00009e50, 0x00ff03f1}, 1121 {0x00009e50, 0x00ff03f1},
1113 {0x00009e54, 0xe4c355c7}, 1122 {0x00009e54, 0xe4c555c2},
1114 {0x00009e58, 0xfd897735}, 1123 {0x00009e58, 0xfd857722},
1115 {0x00009e5c, 0xe9198724}, 1124 {0x00009e5c, 0xe9198724},
1116 {0x00009fc0, 0x803e4788}, 1125 {0x00009fc0, 0x803e4788},
1117 {0x00009fc4, 0x0001efb5}, 1126 {0x00009fc4, 0x0001efb5},
@@ -1142,9 +1151,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
1142 {0x0000a398, 0x001f0e0f}, 1151 {0x0000a398, 0x001f0e0f},
1143 {0x0000a39c, 0x0075393f}, 1152 {0x0000a39c, 0x0075393f},
1144 {0x0000a3a0, 0xb79f6427}, 1153 {0x0000a3a0, 0xb79f6427},
1145 {0x0000a3a4, 0x00000000},
1146 {0x0000a3a8, 0xaaaaaaaa},
1147 {0x0000a3ac, 0x3c466478},
1148 {0x0000a3c0, 0x20202020}, 1154 {0x0000a3c0, 0x20202020},
1149 {0x0000a3c4, 0x22222220}, 1155 {0x0000a3c4, 0x22222220},
1150 {0x0000a3c8, 0x20200020}, 1156 {0x0000a3c8, 0x20200020},
@@ -1167,12 +1173,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
1167 {0x0000a40c, 0x00820820}, 1173 {0x0000a40c, 0x00820820},
1168 {0x0000a414, 0x1ce739ce}, 1174 {0x0000a414, 0x1ce739ce},
1169 {0x0000a418, 0x2d001dce}, 1175 {0x0000a418, 0x2d001dce},
1170 {0x0000a41c, 0x1ce739ce},
1171 {0x0000a420, 0x000001ce},
1172 {0x0000a424, 0x1ce739ce},
1173 {0x0000a428, 0x000001ce},
1174 {0x0000a42c, 0x1ce739ce},
1175 {0x0000a430, 0x1ce739ce},
1176 {0x0000a434, 0x00000000}, 1176 {0x0000a434, 0x00000000},
1177 {0x0000a438, 0x00001801}, 1177 {0x0000a438, 0x00001801},
1178 {0x0000a43c, 0x00100000}, 1178 {0x0000a43c, 0x00100000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 93b45b4b3033..130e5dba9555 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -159,6 +159,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
159/* return block-ack bitmap index given sequence and starting sequence */ 159/* return block-ack bitmap index given sequence and starting sequence */
160#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1)) 160#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
161 161
162/* return the seqno for _start + _offset */
163#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1))
164
162/* returns delimiter padding required given the packet length */ 165/* returns delimiter padding required given the packet length */
163#define ATH_AGGR_GET_NDELIM(_len) \ 166#define ATH_AGGR_GET_NDELIM(_len) \
164 (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \ 167 (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
@@ -238,6 +241,7 @@ struct ath_atx_tid {
238 struct ath_node *an; 241 struct ath_node *an;
239 struct ath_atx_ac *ac; 242 struct ath_atx_ac *ac;
240 unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; 243 unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
244 int bar_index;
241 u16 seq_start; 245 u16 seq_start;
242 u16 seq_next; 246 u16 seq_next;
243 u16 baw_size; 247 u16 baw_size;
@@ -252,9 +256,9 @@ struct ath_atx_tid {
252struct ath_node { 256struct ath_node {
253#ifdef CONFIG_ATH9K_DEBUGFS 257#ifdef CONFIG_ATH9K_DEBUGFS
254 struct list_head list; /* for sc->nodes */ 258 struct list_head list; /* for sc->nodes */
259#endif
255 struct ieee80211_sta *sta; /* station struct we're part of */ 260 struct ieee80211_sta *sta; /* station struct we're part of */
256 struct ieee80211_vif *vif; /* interface with which we're associated */ 261 struct ieee80211_vif *vif; /* interface with which we're associated */
257#endif
258 struct ath_atx_tid tid[WME_NUM_TID]; 262 struct ath_atx_tid tid[WME_NUM_TID];
259 struct ath_atx_ac ac[WME_NUM_AC]; 263 struct ath_atx_ac ac[WME_NUM_AC];
260 int ps_key; 264 int ps_key;
@@ -276,7 +280,6 @@ struct ath_tx_control {
276}; 280};
277 281
278#define ATH_TX_ERROR 0x01 282#define ATH_TX_ERROR 0x01
279#define ATH_TX_BAR 0x02
280 283
281/** 284/**
282 * @txq_map: Index is mac80211 queue number. This is 285 * @txq_map: Index is mac80211 queue number. This is
@@ -462,7 +465,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
462#define ATH_LED_PIN_9287 8 465#define ATH_LED_PIN_9287 8
463#define ATH_LED_PIN_9300 10 466#define ATH_LED_PIN_9300 10
464#define ATH_LED_PIN_9485 6 467#define ATH_LED_PIN_9485 6
465#define ATH_LED_PIN_9462 0 468#define ATH_LED_PIN_9462 4
466 469
467#ifdef CONFIG_MAC80211_LEDS 470#ifdef CONFIG_MAC80211_LEDS
468void ath_init_leds(struct ath_softc *sc); 471void ath_init_leds(struct ath_softc *sc);
@@ -542,7 +545,7 @@ struct ath_ant_comb {
542#define DEFAULT_CACHELINE 32 545#define DEFAULT_CACHELINE 32
543#define ATH_REGCLASSIDS_MAX 10 546#define ATH_REGCLASSIDS_MAX 10
544#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ 547#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
545#define ATH_MAX_SW_RETRIES 10 548#define ATH_MAX_SW_RETRIES 30
546#define ATH_CHAN_MAX 255 549#define ATH_CHAN_MAX 255
547 550
548#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 551#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
@@ -647,6 +650,7 @@ struct ath_softc {
647 struct delayed_work tx_complete_work; 650 struct delayed_work tx_complete_work;
648 struct delayed_work hw_pll_work; 651 struct delayed_work hw_pll_work;
649 struct ath_btcoex btcoex; 652 struct ath_btcoex btcoex;
653 struct ath_mci_coex mci_coex;
650 654
651 struct ath_descdma txsdma; 655 struct ath_descdma txsdma;
652 656
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 5a6361da9818..bbb20810ec10 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -14,13 +14,14 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/export.h>
17#include "hw.h" 18#include "hw.h"
18 19
19enum ath_bt_mode { 20enum ath_bt_mode {
20 ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */ 21 ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
21 ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */ 22 ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
22 ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */ 23 ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
23 ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */ 24 ATH_BT_COEX_MODE_DISABLED, /* coexistence disabled */
24}; 25};
25 26
26struct ath_btcoex_config { 27struct ath_btcoex_config {
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index d5e5db1faad9..278361c867ca 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -54,8 +54,39 @@ enum ath_btcoex_scheme {
54 ATH_BTCOEX_CFG_MCI, 54 ATH_BTCOEX_CFG_MCI,
55}; 55};
56 56
57struct ath9k_hw_mci {
58 u32 raw_intr;
59 u32 rx_msg_intr;
60 u32 cont_status;
61 u32 gpm_addr;
62 u32 gpm_len;
63 u32 gpm_idx;
64 u32 sched_addr;
65 u32 wlan_channels[4];
66 u32 wlan_cal_seq;
67 u32 wlan_cal_done;
68 u32 config;
69 u8 *gpm_buf;
70 u8 *sched_buf;
71 bool ready;
72 bool update_2g5g;
73 bool is_2g;
74 bool query_bt;
75 bool unhalt_bt_gpm; /* need send UNHALT */
76 bool halted_bt_gpm; /* HALT sent */
77 bool need_flush_btinfo;
78 bool bt_version_known;
79 bool wlan_channels_update;
80 u8 wlan_ver_major;
81 u8 wlan_ver_minor;
82 u8 bt_ver_major;
83 u8 bt_ver_minor;
84 u8 bt_state;
85};
86
57struct ath_btcoex_hw { 87struct ath_btcoex_hw {
58 enum ath_btcoex_scheme scheme; 88 enum ath_btcoex_scheme scheme;
89 struct ath9k_hw_mci mci;
59 bool enabled; 90 bool enabled;
60 u8 wlanactive_gpio; 91 u8 wlanactive_gpio;
61 u8 btactive_gpio; 92 u8 btactive_gpio;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index ebaf304f464b..99538810a312 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -16,6 +16,7 @@
16 16
17#include "hw.h" 17#include "hw.h"
18#include "hw-ops.h" 18#include "hw-ops.h"
19#include <linux/export.h>
19 20
20/* Common calibration code */ 21/* Common calibration code */
21 22
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 8e7e57ccbe9a..68d972bf232d 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/export.h>
19#include <asm/unaligned.h> 20#include <asm/unaligned.h>
20 21
21#include "ath9k.h" 22#include "ath9k.h"
@@ -855,7 +856,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
855 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; 856 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
856 857
857 if (bf_isampdu(bf)) { 858 if (bf_isampdu(bf)) {
858 if (flags & ATH_TX_BAR) 859 if (flags & ATH_TX_ERROR)
859 TX_STAT_INC(qnum, a_xretries); 860 TX_STAT_INC(qnum, a_xretries);
860 else 861 else
861 TX_STAT_INC(qnum, a_completed); 862 TX_STAT_INC(qnum, a_completed);
@@ -1629,6 +1630,9 @@ int ath9k_init_debug(struct ath_hw *ah)
1629 debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1630 debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1630 sc, &fops_debug); 1631 sc, &fops_debug);
1631#endif 1632#endif
1633
1634 ath9k_dfs_init_debug(sc);
1635
1632 debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc, 1636 debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
1633 &fops_dma); 1637 &fops_dma);
1634 debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc, 1638 debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 356352ac2d6e..776a24ada600 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -19,6 +19,7 @@
19 19
20#include "hw.h" 20#include "hw.h"
21#include "rc.h" 21#include "rc.h"
22#include "dfs_debug.h"
22 23
23struct ath_txq; 24struct ath_txq;
24struct ath_buf; 25struct ath_buf;
@@ -187,6 +188,7 @@ struct ath_stats {
187 struct ath_interrupt_stats istats; 188 struct ath_interrupt_stats istats;
188 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; 189 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
189 struct ath_rx_stats rxstats; 190 struct ath_rx_stats rxstats;
191 struct ath_dfs_stats dfs_stats;
190 u32 reset[__RESET_TYPE_MAX]; 192 u32 reset[__RESET_TYPE_MAX];
191}; 193};
192 194
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
new file mode 100644
index 000000000000..e4e84a9e6273
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * Copyright (c) 2011 Neratec Solutions AG
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "hw.h"
19#include "hw-ops.h"
20#include "ath9k.h"
21#include "dfs.h"
22#include "dfs_debug.h"
23
24/*
25 * TODO: move into or synchronize this with generic header
26 * as soon as IF is defined
27 */
28struct dfs_radar_pulse {
29 u16 freq;
30 u64 ts;
31 u32 width;
32 u8 rssi;
33};
34
35/* internal struct to pass radar data */
36struct ath_radar_data {
37 u8 pulse_bw_info;
38 u8 rssi;
39 u8 ext_rssi;
40 u8 pulse_length_ext;
41 u8 pulse_length_pri;
42};
43
44/* convert pulse duration to usecs, considering clock mode */
45static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
46{
47 const u32 AR93X_NSECS_PER_DUR = 800;
48 const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11);
49 u32 nsecs;
50
51 if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan))
52 nsecs = dur * AR93X_NSECS_PER_DUR_FAST;
53 else
54 nsecs = dur * AR93X_NSECS_PER_DUR;
55
56 return (nsecs + 500) / 1000;
57}
58
59#define PRI_CH_RADAR_FOUND 0x01
60#define EXT_CH_RADAR_FOUND 0x02
61static bool
62ath9k_postprocess_radar_event(struct ath_softc *sc,
63 struct ath_radar_data *are,
64 struct dfs_radar_pulse *drp)
65{
66 u8 rssi;
67 u16 dur;
68
69 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_DFS,
70 "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
71 are->pulse_bw_info,
72 are->pulse_length_pri, are->rssi,
73 are->pulse_length_ext, are->ext_rssi);
74
75 /*
76 * Only the last 2 bits of the BW info are relevant, they indicate
77 * which channel the radar was detected in.
78 */
79 are->pulse_bw_info &= 0x03;
80
81 switch (are->pulse_bw_info) {
82 case PRI_CH_RADAR_FOUND:
83 /* radar in ctrl channel */
84 dur = are->pulse_length_pri;
85 DFS_STAT_INC(sc, pri_phy_errors);
86 /*
87 * cannot use ctrl channel RSSI
88 * if extension channel is stronger
89 */
90 rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
91 break;
92 case EXT_CH_RADAR_FOUND:
93 /* radar in extension channel */
94 dur = are->pulse_length_ext;
95 DFS_STAT_INC(sc, ext_phy_errors);
96 /*
97 * cannot use extension channel RSSI
98 * if control channel is stronger
99 */
100 rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
101 break;
102 case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
103 /*
104 * Conducted testing, when pulse is on DC, both pri and ext
105 * durations are reported to be same
106 *
107 * Radiated testing, when pulse is on DC, different pri and
108 * ext durations are reported, so take the larger of the two
109 */
110 if (are->pulse_length_ext >= are->pulse_length_pri)
111 dur = are->pulse_length_ext;
112 else
113 dur = are->pulse_length_pri;
114 DFS_STAT_INC(sc, dc_phy_errors);
115
116 /* when both are present use stronger one */
117 rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
118 break;
119 default:
120 /*
121 * Bogus bandwidth info was received in descriptor,
122 * so ignore this PHY error
123 */
124 DFS_STAT_INC(sc, bwinfo_discards);
125 return false;
126 }
127
128 if (rssi == 0) {
129 DFS_STAT_INC(sc, rssi_discards);
130 return false;
131 }
132
133 /*
134 * TODO: check chirping pulses
135 * checks for chirping are dependent on the DFS regulatory domain
136 * used, which is yet TBD
137 */
138
139 /* convert duration to usecs */
140 drp->width = dur_to_usecs(sc->sc_ah, dur);
141 drp->rssi = rssi;
142
143 DFS_STAT_INC(sc, pulses_detected);
144 return true;
145}
146#undef PRI_CH_RADAR_FOUND
147#undef EXT_CH_RADAR_FOUND
148
149/*
150 * DFS: check PHY-error for radar pulse and feed the detector
151 */
152void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
153 struct ath_rx_status *rs, u64 mactime)
154{
155 struct ath_radar_data ard;
156 u16 datalen;
157 char *vdata_end;
158 struct dfs_radar_pulse drp;
159 struct ath_hw *ah = sc->sc_ah;
160 struct ath_common *common = ath9k_hw_common(ah);
161
162 if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
163 (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
164 ath_dbg(common, ATH_DBG_DFS,
165 "Error: rs_phyer=0x%x not a radar error\n",
166 rs->rs_phyerr);
167 return;
168 }
169
170 datalen = rs->rs_datalen;
171 if (datalen == 0) {
172 DFS_STAT_INC(sc, datalen_discards);
173 return;
174 }
175
176 ard.rssi = rs->rs_rssi_ctl0;
177 ard.ext_rssi = rs->rs_rssi_ext0;
178
179 /*
180 * hardware stores this as 8 bit signed value.
181 * we will cap it at 0 if it is a negative number
182 */
183 if (ard.rssi & 0x80)
184 ard.rssi = 0;
185 if (ard.ext_rssi & 0x80)
186 ard.ext_rssi = 0;
187
188 vdata_end = (char *)data + datalen;
189 ard.pulse_bw_info = vdata_end[-1];
190 ard.pulse_length_ext = vdata_end[-2];
191 ard.pulse_length_pri = vdata_end[-3];
192
193 ath_dbg(common, ATH_DBG_DFS,
194 "bw_info=%d, length_pri=%d, length_ext=%d, "
195 "rssi_pri=%d, rssi_ext=%d\n",
196 ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
197 ard.rssi, ard.ext_rssi);
198
199 drp.freq = ah->curchan->channel;
200 drp.ts = mactime;
201 if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
202 static u64 last_ts;
203 ath_dbg(common, ATH_DBG_DFS,
204 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
205 "width=%d, rssi=%d, delta_ts=%llu\n",
206 drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
207 last_ts = drp.ts;
208 /*
209 * TODO: forward pulse to pattern detector
210 *
211 * ieee80211_add_radar_pulse(drp.freq, drp.ts,
212 * drp.width, drp.rssi);
213 */
214 }
215}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
new file mode 100644
index 000000000000..c2412857f122
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * Copyright (c) 2011 Neratec Solutions AG
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef ATH9K_DFS_H
19#define ATH9K_DFS_H
20
21#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
22/**
23 * ath9k_dfs_process_phyerr - process radar PHY error
24 * @sc: ath_softc
25 * @data: RX payload data
26 * @rs: RX status after processing descriptor
27 * @mactime: receive time
28 *
29 * This function is called whenever the HW DFS module detects a radar
30 * pulse and reports it as a PHY error.
31 *
32 * The radar information provided as raw payload data is validated and
33 * filtered for false pulses. Events passing all tests are forwarded to
34 * the upper layer for pattern detection.
35 */
36void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
37 struct ath_rx_status *rs, u64 mactime);
38#else
39static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
40 struct ath_rx_status *rs, u64 mactime) { }
41#endif
42
43#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
new file mode 100644
index 000000000000..106d031d834a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * Copyright (c) 2011 Neratec Solutions AG
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/debugfs.h>
19#include <linux/export.h>
20
21#include "ath9k.h"
22#include "dfs_debug.h"
23
24#define ATH9K_DFS_STAT(s, p) \
25 len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
26 sc->debug.stats.dfs_stats.p);
27
28static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
29 size_t count, loff_t *ppos)
30{
31 struct ath_softc *sc = file->private_data;
32 struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version;
33 char *buf;
34 unsigned int len = 0, size = 8000;
35 ssize_t retval = 0;
36
37 buf = kzalloc(size, GFP_KERNEL);
38 if (buf == NULL)
39 return -ENOMEM;
40
41 len += snprintf(buf + len, size - len, "DFS support for "
42 "macVersion = 0x%x, macRev = 0x%x: %s\n",
43 hw_ver->macVersion, hw_ver->macRev,
44 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
45 "enabled" : "disabled");
46 ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
47 ATH9K_DFS_STAT("Datalen discards ", datalen_discards);
48 ATH9K_DFS_STAT("RSSI discards ", rssi_discards);
49 ATH9K_DFS_STAT("BW info discards ", bwinfo_discards);
50 ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
51 ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
52 ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
53
54 if (len > size)
55 len = size;
56
57 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
58 kfree(buf);
59
60 return retval;
61}
62
63static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file)
64{
65 file->private_data = inode->i_private;
66
67 return 0;
68}
69
70static const struct file_operations fops_dfs_stats = {
71 .read = read_file_dfs,
72 .open = ath9k_dfs_debugfs_open,
73 .owner = THIS_MODULE,
74 .llseek = default_llseek,
75};
76
77void ath9k_dfs_init_debug(struct ath_softc *sc)
78{
79 debugfs_create_file("dfs_stats", S_IRUSR,
80 sc->debug.debugfs_phy, sc, &fops_dfs_stats);
81}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
new file mode 100644
index 000000000000..6e1e2a71659e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -0,0 +1,57 @@
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * Copyright (c) 2011 Neratec Solutions AG
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18
19#ifndef DFS_DEBUG_H
20#define DFS_DEBUG_H
21
22#include "hw.h"
23
24/**
25 * struct ath_dfs_stats - DFS Statistics
26 *
27 * @pulses_detected: No. of pulses detected so far
28 * @datalen_discards: No. of pulses discarded due to invalid datalen
29 * @rssi_discards: No. of pulses discarded due to invalid RSSI
30 * @bwinfo_discards: No. of pulses discarded due to invalid BW info
31 * @pri_phy_errors: No. of pulses reported for primary channel
32 * @ext_phy_errors: No. of pulses reported for extension channel
33 * @dc_phy_errors: No. of pulses reported for primary + extension channel
34 */
35struct ath_dfs_stats {
36 u32 pulses_detected;
37 u32 datalen_discards;
38 u32 rssi_discards;
39 u32 bwinfo_discards;
40 u32 pri_phy_errors;
41 u32 ext_phy_errors;
42 u32 dc_phy_errors;
43};
44
45#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
46
47#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
48void ath9k_dfs_init_debug(struct ath_softc *sc);
49
50#else
51
52#define DFS_STAT_INC(sc, c) do { } while (0)
53static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
54
55#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
56
57#endif /* DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9a7520f987f0..61fcab0e2d76 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -473,7 +473,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
473 473
474 int i; 474 int i;
475 u16 twiceMinEdgePower; 475 u16 twiceMinEdgePower;
476 u16 twiceMaxEdgePower = MAX_RATE_POWER; 476 u16 twiceMaxEdgePower;
477 u16 scaledPower = 0, minCtlPower; 477 u16 scaledPower = 0, minCtlPower;
478 u16 numCtlModes; 478 u16 numCtlModes;
479 const u16 *pCtlMode; 479 const u16 *pCtlMode;
@@ -542,9 +542,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
542 else 542 else
543 freq = centers.ctl_center; 543 freq = centers.ctl_center;
544 544
545 if (ah->eep_ops->get_eeprom_ver(ah) == 14 && 545 twiceMaxEdgePower = MAX_RATE_POWER;
546 ah->eep_ops->get_eeprom_rev(ah) <= 2)
547 twiceMaxEdgePower = MAX_RATE_POWER;
548 546
549 for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && 547 for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
550 pEepData->ctlIndex[i]; i++) { 548 pEepData->ctlIndex[i]; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 4f5c50a87ce3..0981c073471d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -569,7 +569,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
569#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 569#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
570#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 570#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
571 571
572 u16 twiceMaxEdgePower = MAX_RATE_POWER; 572 u16 twiceMaxEdgePower;
573 int i; 573 int i;
574 struct cal_ctl_data_ar9287 *rep; 574 struct cal_ctl_data_ar9287 *rep;
575 struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} }, 575 struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
@@ -669,6 +669,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
669 else 669 else
670 freq = centers.ctl_center; 670 freq = centers.ctl_center;
671 671
672 twiceMaxEdgePower = MAX_RATE_POWER;
672 /* Walk through the CTL indices stored in EEPROM */ 673 /* Walk through the CTL indices stored in EEPROM */
673 for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { 674 for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
674 struct cal_ctl_edges *pRdEdgesPower; 675 struct cal_ctl_edges *pRdEdgesPower;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 81e629671679..55a21d39167c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1000,7 +1000,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
1000#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ 1000#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
1001 1001
1002 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; 1002 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
1003 u16 twiceMaxEdgePower = MAX_RATE_POWER; 1003 u16 twiceMaxEdgePower;
1004 int i; 1004 int i;
1005 struct cal_ctl_data *rep; 1005 struct cal_ctl_data *rep;
1006 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { 1006 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
@@ -1121,9 +1121,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
1121 else 1121 else
1122 freq = centers.ctl_center; 1122 freq = centers.ctl_center;
1123 1123
1124 if (ah->eep_ops->get_eeprom_ver(ah) == 14 && 1124 twiceMaxEdgePower = MAX_RATE_POWER;
1125 ah->eep_ops->get_eeprom_rev(ah) <= 2)
1126 twiceMaxEdgePower = MAX_RATE_POWER;
1127 1125
1128 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { 1126 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
1129 if ((((cfgCtl & ~CTL_MODE_M) | 1127 if ((((cfgCtl & ~CTL_MODE_M) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0b9a0e8a4958..f8ce4ea6f65c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -808,7 +808,8 @@ void ath9k_htc_ani_work(struct work_struct *work)
808 } 808 }
809 809
810 /* Verify whether we must check ANI */ 810 /* Verify whether we must check ANI */
811 if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { 811 if (ah->config.enable_ani &&
812 (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
812 aniflag = true; 813 aniflag = true;
813 common->ani.checkani_timer = timestamp; 814 common->ani.checkani_timer = timestamp;
814 } 815 }
@@ -838,7 +839,7 @@ set_timer:
838 * short calibration and long calibration. 839 * short calibration and long calibration.
839 */ 840 */
840 cal_interval = ATH_LONG_CALINTERVAL; 841 cal_interval = ATH_LONG_CALINTERVAL;
841 if (priv->ah->config.enable_ani) 842 if (ah->config.enable_ani)
842 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); 843 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
843 if (!common->ani.caldone) 844 if (!common->ani.caldone)
844 cal_interval = min(cal_interval, (u32)short_cal_interval); 845 cal_interval = min(cal_interval, (u32)short_cal_interval);
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index e74c233757a2..c4ad0b06bdbc 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -212,4 +212,13 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah,
212 return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan, 212 return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
213 ini_reloaded); 213 ini_reloaded);
214} 214}
215
216static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
217{
218 if (!ath9k_hw_private_ops(ah)->set_radar_params)
219 return;
220
221 ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
222}
223
215#endif /* ATH9K_HW_OPS_H */ 224#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 27471f80d8b2..8cda9a1513a7 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/module.h>
19#include <asm/unaligned.h> 20#include <asm/unaligned.h>
20 21
21#include "hw.h" 22#include "hw.h"
@@ -503,7 +504,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
503 return ecode; 504 return ecode;
504 } 505 }
505 506
506 if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) { 507 if (ah->config.enable_ani) {
507 ath9k_hw_ani_setup(ah); 508 ath9k_hw_ani_setup(ah);
508 ath9k_hw_ani_init(ah); 509 ath9k_hw_ani_init(ah);
509 } 510 }
@@ -609,6 +610,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
609 if (!AR_SREV_9300_20_OR_LATER(ah)) 610 if (!AR_SREV_9300_20_OR_LATER(ah))
610 ah->ani_function &= ~ATH9K_ANI_MRC_CCK; 611 ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
611 612
613 /* disable ANI for 9340 */
614 if (AR_SREV_9340(ah))
615 ah->config.enable_ani = false;
616
612 ath9k_hw_init_mode_regs(ah); 617 ath9k_hw_init_mode_regs(ah);
613 618
614 if (!ah->is_pciexpress) 619 if (!ah->is_pciexpress)
@@ -1349,6 +1354,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1349 1354
1350static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type) 1355static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1351{ 1356{
1357 bool ret = false;
1352 1358
1353 if (AR_SREV_9300_20_OR_LATER(ah)) { 1359 if (AR_SREV_9300_20_OR_LATER(ah)) {
1354 REG_WRITE(ah, AR_WA, ah->WARegVal); 1360 REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -1360,13 +1366,20 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1360 1366
1361 switch (type) { 1367 switch (type) {
1362 case ATH9K_RESET_POWER_ON: 1368 case ATH9K_RESET_POWER_ON:
1363 return ath9k_hw_set_reset_power_on(ah); 1369 ret = ath9k_hw_set_reset_power_on(ah);
1370 break;
1364 case ATH9K_RESET_WARM: 1371 case ATH9K_RESET_WARM:
1365 case ATH9K_RESET_COLD: 1372 case ATH9K_RESET_COLD:
1366 return ath9k_hw_set_reset(ah, type); 1373 ret = ath9k_hw_set_reset(ah, type);
1374 break;
1367 default: 1375 default:
1368 return false; 1376 break;
1369 } 1377 }
1378
1379 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
1380 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
1381
1382 return ret;
1370} 1383}
1371 1384
1372static bool ath9k_hw_chip_reset(struct ath_hw *ah, 1385static bool ath9k_hw_chip_reset(struct ath_hw *ah,
@@ -1505,6 +1518,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1505 struct ath9k_hw_cal_data *caldata, bool bChannelChange) 1518 struct ath9k_hw_cal_data *caldata, bool bChannelChange)
1506{ 1519{
1507 struct ath_common *common = ath9k_hw_common(ah); 1520 struct ath_common *common = ath9k_hw_common(ah);
1521 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
1508 u32 saveLedState; 1522 u32 saveLedState;
1509 struct ath9k_channel *curchan = ah->curchan; 1523 struct ath9k_channel *curchan = ah->curchan;
1510 u32 saveDefAntenna; 1524 u32 saveDefAntenna;
@@ -1512,6 +1526,53 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1512 u64 tsf = 0; 1526 u64 tsf = 0;
1513 int i, r; 1527 int i, r;
1514 bool allow_fbs = false; 1528 bool allow_fbs = false;
1529 bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
1530 bool save_fullsleep = ah->chip_fullsleep;
1531
1532 if (mci) {
1533
1534 ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
1535
1536 if (mci_hw->bt_state == MCI_BT_CAL_START) {
1537 u32 payload[4] = {0, 0, 0, 0};
1538
1539 ath_dbg(common, ATH_DBG_MCI, "MCI stop rx for BT CAL");
1540
1541 mci_hw->bt_state = MCI_BT_CAL;
1542
1543 /*
1544 * MCI FIX: disable mci interrupt here. This is to avoid
1545 * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
1546 * lead to mci_intr reentry.
1547 */
1548
1549 ar9003_mci_disable_interrupt(ah);
1550
1551 ath_dbg(common, ATH_DBG_MCI, "send WLAN_CAL_GRANT");
1552 MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
1553 ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
1554 16, true, false);
1555
1556 ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is calibrating");
1557
1558 /* Wait BT calibration to be completed for 25ms */
1559
1560 if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
1561 0, 25000))
1562 ath_dbg(common, ATH_DBG_MCI,
1563 "MCI got BT_CAL_DONE\n");
1564 else
1565 ath_dbg(common, ATH_DBG_MCI,
1566 "MCI ### BT cal takes to long, force"
1567 "bt_state to be bt_awake\n");
1568 mci_hw->bt_state = MCI_BT_AWAKE;
1569 /* MCI FIX: enable mci interrupt here */
1570 ar9003_mci_enable_interrupt(ah);
1571
1572 return true;
1573 }
1574 }
1575
1515 1576
1516 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1577 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1517 return -EIO; 1578 return -EIO;
@@ -1549,12 +1610,29 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1549 if (ath9k_hw_channel_change(ah, chan)) { 1610 if (ath9k_hw_channel_change(ah, chan)) {
1550 ath9k_hw_loadnf(ah, ah->curchan); 1611 ath9k_hw_loadnf(ah, ah->curchan);
1551 ath9k_hw_start_nfcal(ah, true); 1612 ath9k_hw_start_nfcal(ah, true);
1613 if (mci && mci_hw->ready)
1614 ar9003_mci_2g5g_switch(ah, true);
1615
1552 if (AR_SREV_9271(ah)) 1616 if (AR_SREV_9271(ah))
1553 ar9002_hw_load_ani_reg(ah, chan); 1617 ar9002_hw_load_ani_reg(ah, chan);
1554 return 0; 1618 return 0;
1555 } 1619 }
1556 } 1620 }
1557 1621
1622 if (mci) {
1623 ar9003_mci_disable_interrupt(ah);
1624
1625 if (mci_hw->ready && !save_fullsleep) {
1626 ar9003_mci_mute_bt(ah);
1627 udelay(20);
1628 REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
1629 }
1630
1631 mci_hw->bt_state = MCI_BT_SLEEP;
1632 mci_hw->ready = false;
1633 }
1634
1635
1558 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); 1636 saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
1559 if (saveDefAntenna == 0) 1637 if (saveDefAntenna == 0)
1560 saveDefAntenna = 1; 1638 saveDefAntenna = 1;
@@ -1610,6 +1688,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1610 if (r) 1688 if (r)
1611 return r; 1689 return r;
1612 1690
1691 if (mci)
1692 ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
1693
1613 /* 1694 /*
1614 * Some AR91xx SoC devices frequently fail to accept TSF writes 1695 * Some AR91xx SoC devices frequently fail to accept TSF writes
1615 * right after the chip reset. When that happens, write a new 1696 * right after the chip reset. When that happens, write a new
@@ -1727,6 +1808,55 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1727 ath9k_hw_loadnf(ah, chan); 1808 ath9k_hw_loadnf(ah, chan);
1728 ath9k_hw_start_nfcal(ah, true); 1809 ath9k_hw_start_nfcal(ah, true);
1729 1810
1811 if (mci && mci_hw->ready) {
1812
1813 if (IS_CHAN_2GHZ(chan) &&
1814 (mci_hw->bt_state == MCI_BT_SLEEP)) {
1815
1816 if (ar9003_mci_check_int(ah,
1817 AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
1818 ar9003_mci_check_int(ah,
1819 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
1820
1821 /*
1822 * BT is sleeping. Check if BT wakes up during
1823 * WLAN calibration. If BT wakes up during
1824 * WLAN calibration, need to go through all
1825 * message exchanges again and recal.
1826 */
1827
1828 ath_dbg(common, ATH_DBG_MCI, "MCI BT wakes up"
1829 "during WLAN calibration\n");
1830
1831 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
1832 AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
1833 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
1834 ath_dbg(common, ATH_DBG_MCI, "MCI send"
1835 "REMOTE_RESET\n");
1836 ar9003_mci_remote_reset(ah, true);
1837 ar9003_mci_send_sys_waking(ah, true);
1838 udelay(1);
1839 if (IS_CHAN_2GHZ(chan))
1840 ar9003_mci_send_lna_transfer(ah, true);
1841
1842 mci_hw->bt_state = MCI_BT_AWAKE;
1843
1844 ath_dbg(common, ATH_DBG_MCI, "MCI re-cal\n");
1845
1846 if (caldata) {
1847 caldata->done_txiqcal_once = false;
1848 caldata->done_txclcal_once = false;
1849 caldata->rtt_hist.num_readings = 0;
1850 }
1851
1852 if (!ath9k_hw_init_cal(ah, chan))
1853 return -EIO;
1854
1855 }
1856 }
1857 ar9003_mci_enable_interrupt(ah);
1858 }
1859
1730 ENABLE_REGWRITE_BUFFER(ah); 1860 ENABLE_REGWRITE_BUFFER(ah);
1731 1861
1732 ath9k_hw_restore_chainmask(ah); 1862 ath9k_hw_restore_chainmask(ah);
@@ -1769,6 +1899,21 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1769 if (ah->btcoex_hw.enabled) 1899 if (ah->btcoex_hw.enabled)
1770 ath9k_hw_btcoex_enable(ah); 1900 ath9k_hw_btcoex_enable(ah);
1771 1901
1902 if (mci && mci_hw->ready) {
1903 /*
1904 * check BT state again to make
1905 * sure it's not changed.
1906 */
1907
1908 ar9003_mci_sync_bt_state(ah);
1909 ar9003_mci_2g5g_switch(ah, true);
1910
1911 if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
1912 (mci_hw->query_bt == true)) {
1913 mci_hw->need_flush_btinfo = true;
1914 }
1915 }
1916
1772 if (AR_SREV_9300_20_OR_LATER(ah)) { 1917 if (AR_SREV_9300_20_OR_LATER(ah)) {
1773 ar9003_hw_bb_watchdog_config(ah); 1918 ar9003_hw_bb_watchdog_config(ah);
1774 1919
@@ -1826,7 +1971,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
1826 } 1971 }
1827 1972
1828 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ 1973 /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
1829 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); 1974 if (AR_SREV_9300_20_OR_LATER(ah))
1975 REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
1830} 1976}
1831 1977
1832/* 1978/*
@@ -1932,6 +2078,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
1932bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode) 2078bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
1933{ 2079{
1934 struct ath_common *common = ath9k_hw_common(ah); 2080 struct ath_common *common = ath9k_hw_common(ah);
2081 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1935 int status = true, setChip = true; 2082 int status = true, setChip = true;
1936 static const char *modes[] = { 2083 static const char *modes[] = {
1937 "AWAKE", 2084 "AWAKE",
@@ -1949,12 +2096,35 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
1949 switch (mode) { 2096 switch (mode) {
1950 case ATH9K_PM_AWAKE: 2097 case ATH9K_PM_AWAKE:
1951 status = ath9k_hw_set_power_awake(ah, setChip); 2098 status = ath9k_hw_set_power_awake(ah, setChip);
2099
2100 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
2101 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
2102
1952 break; 2103 break;
1953 case ATH9K_PM_FULL_SLEEP: 2104 case ATH9K_PM_FULL_SLEEP:
2105
2106 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
2107 if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
2108 (mci->bt_state != MCI_BT_SLEEP) &&
2109 !mci->halted_bt_gpm) {
2110 ath_dbg(common, ATH_DBG_MCI, "MCI halt BT GPM"
2111 "(full_sleep)");
2112 ar9003_mci_send_coex_halt_bt_gpm(ah,
2113 true, true);
2114 }
2115
2116 mci->ready = false;
2117 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
2118 }
2119
1954 ath9k_set_power_sleep(ah, setChip); 2120 ath9k_set_power_sleep(ah, setChip);
1955 ah->chip_fullsleep = true; 2121 ah->chip_fullsleep = true;
1956 break; 2122 break;
1957 case ATH9K_PM_NETWORK_SLEEP: 2123 case ATH9K_PM_NETWORK_SLEEP:
2124
2125 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
2126 REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
2127
1958 ath9k_set_power_network_sleep(ah, setChip); 2128 ath9k_set_power_network_sleep(ah, setChip);
1959 break; 2129 break;
1960 default: 2130 default:
@@ -2107,6 +2277,30 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
2107 return chip_chainmask; 2277 return chip_chainmask;
2108} 2278}
2109 2279
2280/**
2281 * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
2282 * @ah: the atheros hardware data structure
2283 *
2284 * We enable DFS support upstream on chipsets which have passed a series
2285 * of tests. The testing requirements are going to be documented. Desired
2286 * test requirements are documented at:
2287 *
2288 * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
2289 *
2290 * Once a new chipset gets properly tested an individual commit can be used
2291 * to document the testing for DFS for that chipset.
2292 */
2293static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
2294{
2295
2296 switch (ah->hw_version.macVersion) {
2297 /* AR9580 will likely be our first target to get testing on */
2298 case AR_SREV_VERSION_9580:
2299 default:
2300 return false;
2301 }
2302}
2303
2110int ath9k_hw_fill_cap_info(struct ath_hw *ah) 2304int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2111{ 2305{
2112 struct ath9k_hw_capabilities *pCap = &ah->caps; 2306 struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2147,6 +2341,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2147 2341
2148 if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah)) 2342 if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
2149 chip_chainmask = 1; 2343 chip_chainmask = 1;
2344 else if (AR_SREV_9462(ah))
2345 chip_chainmask = 3;
2150 else if (!AR_SREV_9280_20_OR_LATER(ah)) 2346 else if (!AR_SREV_9280_20_OR_LATER(ah))
2151 chip_chainmask = 7; 2347 chip_chainmask = 7;
2152 else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah)) 2348 else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
@@ -2203,12 +2399,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2203 else 2399 else
2204 pCap->num_gpio_pins = AR_NUM_GPIO; 2400 pCap->num_gpio_pins = AR_NUM_GPIO;
2205 2401
2206 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) { 2402 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
2207 pCap->hw_caps |= ATH9K_HW_CAP_CST;
2208 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; 2403 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
2209 } else { 2404 else
2210 pCap->rts_aggr_limit = (8 * 1024); 2405 pCap->rts_aggr_limit = (8 * 1024);
2211 }
2212 2406
2213#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 2407#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2214 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); 2408 ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
@@ -2232,7 +2426,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2232 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; 2426 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
2233 2427
2234 if (common->btcoex_enabled) { 2428 if (common->btcoex_enabled) {
2235 if (AR_SREV_9300_20_OR_LATER(ah)) { 2429 if (AR_SREV_9462(ah))
2430 btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
2431 else if (AR_SREV_9300_20_OR_LATER(ah)) {
2236 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; 2432 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
2237 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; 2433 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
2238 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; 2434 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -2316,6 +2512,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2316 pCap->pcie_lcr_offset = 0x80; 2512 pCap->pcie_lcr_offset = 0x80;
2317 } 2513 }
2318 2514
2515 if (ath9k_hw_dfs_tested(ah))
2516 pCap->hw_caps |= ATH9K_HW_CAP_DFS;
2517
2319 tx_chainmask = pCap->tx_chainmask; 2518 tx_chainmask = pCap->tx_chainmask;
2320 rx_chainmask = pCap->rx_chainmask; 2519 rx_chainmask = pCap->rx_chainmask;
2321 while (tx_chainmask || rx_chainmask) { 2520 while (tx_chainmask || rx_chainmask) {
@@ -2330,7 +2529,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2330 2529
2331 if (AR_SREV_9300_20_OR_LATER(ah)) { 2530 if (AR_SREV_9300_20_OR_LATER(ah)) {
2332 ah->enabled_cals |= TX_IQ_CAL; 2531 ah->enabled_cals |= TX_IQ_CAL;
2333 if (!AR_SREV_9330(ah)) 2532 if (AR_SREV_9485_OR_LATER(ah))
2334 ah->enabled_cals |= TX_IQ_ON_AGC_CAL; 2533 ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
2335 } 2534 }
2336 if (AR_SREV_9462(ah)) 2535 if (AR_SREV_9462(ah))
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 3cb878c28ccf..615cc839f0de 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -126,6 +126,16 @@
126#define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4 126#define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4
127#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5 127#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
128#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6 128#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
129#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA 0x16
130#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK 0x17
131#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA 0x18
132#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK 0x19
133#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX 0x14
134#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX 0x13
135#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX 9
136#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX 8
137#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE 0x1d
138#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
129 139
130#define AR_GPIOD_MASK 0x00001FFF 140#define AR_GPIOD_MASK 0x00001FFF
131#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) 141#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
@@ -186,21 +196,21 @@ enum ath_ini_subsys {
186enum ath9k_hw_caps { 196enum ath9k_hw_caps {
187 ATH9K_HW_CAP_HT = BIT(0), 197 ATH9K_HW_CAP_HT = BIT(0),
188 ATH9K_HW_CAP_RFSILENT = BIT(1), 198 ATH9K_HW_CAP_RFSILENT = BIT(1),
189 ATH9K_HW_CAP_CST = BIT(2), 199 ATH9K_HW_CAP_AUTOSLEEP = BIT(2),
190 ATH9K_HW_CAP_AUTOSLEEP = BIT(4), 200 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(3),
191 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5), 201 ATH9K_HW_CAP_EDMA = BIT(4),
192 ATH9K_HW_CAP_EDMA = BIT(6), 202 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(5),
193 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7), 203 ATH9K_HW_CAP_LDPC = BIT(6),
194 ATH9K_HW_CAP_LDPC = BIT(8), 204 ATH9K_HW_CAP_FASTCLOCK = BIT(7),
195 ATH9K_HW_CAP_FASTCLOCK = BIT(9), 205 ATH9K_HW_CAP_SGI_20 = BIT(8),
196 ATH9K_HW_CAP_SGI_20 = BIT(10), 206 ATH9K_HW_CAP_PAPRD = BIT(9),
197 ATH9K_HW_CAP_PAPRD = BIT(11), 207 ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10),
198 ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12), 208 ATH9K_HW_CAP_2GHZ = BIT(11),
199 ATH9K_HW_CAP_2GHZ = BIT(13), 209 ATH9K_HW_CAP_5GHZ = BIT(12),
200 ATH9K_HW_CAP_5GHZ = BIT(14), 210 ATH9K_HW_CAP_APM = BIT(13),
201 ATH9K_HW_CAP_APM = BIT(15), 211 ATH9K_HW_CAP_RTT = BIT(14),
202 ATH9K_HW_CAP_RTT = BIT(16), 212 ATH9K_HW_CAP_MCI = BIT(15),
203 ATH9K_HW_CAP_MCI = BIT(17), 213 ATH9K_HW_CAP_DFS = BIT(16),
204}; 214};
205 215
206struct ath9k_hw_capabilities { 216struct ath9k_hw_capabilities {
@@ -266,6 +276,7 @@ enum ath9k_int {
266 ATH9K_INT_TX = 0x00000040, 276 ATH9K_INT_TX = 0x00000040,
267 ATH9K_INT_TXDESC = 0x00000080, 277 ATH9K_INT_TXDESC = 0x00000080,
268 ATH9K_INT_TIM_TIMER = 0x00000100, 278 ATH9K_INT_TIM_TIMER = 0x00000100,
279 ATH9K_INT_MCI = 0x00000200,
269 ATH9K_INT_BB_WATCHDOG = 0x00000400, 280 ATH9K_INT_BB_WATCHDOG = 0x00000400,
270 ATH9K_INT_TXURN = 0x00000800, 281 ATH9K_INT_TXURN = 0x00000800,
271 ATH9K_INT_MIB = 0x00001000, 282 ATH9K_INT_MIB = 0x00001000,
@@ -417,6 +428,25 @@ enum ath9k_rx_qtype {
417 ATH9K_RX_QUEUE_MAX, 428 ATH9K_RX_QUEUE_MAX,
418}; 429};
419 430
431enum mci_message_header { /* length of payload */
432 MCI_LNA_CTRL = 0x10, /* len = 0 */
433 MCI_CONT_NACK = 0x20, /* len = 0 */
434 MCI_CONT_INFO = 0x30, /* len = 4 */
435 MCI_CONT_RST = 0x40, /* len = 0 */
436 MCI_SCHD_INFO = 0x50, /* len = 16 */
437 MCI_CPU_INT = 0x60, /* len = 4 */
438 MCI_SYS_WAKING = 0x70, /* len = 0 */
439 MCI_GPM = 0x80, /* len = 16 */
440 MCI_LNA_INFO = 0x90, /* len = 1 */
441 MCI_LNA_STATE = 0x94,
442 MCI_LNA_TAKE = 0x98,
443 MCI_LNA_TRANS = 0x9c,
444 MCI_SYS_SLEEPING = 0xa0, /* len = 0 */
445 MCI_REQ_WAKE = 0xc0, /* len = 0 */
446 MCI_DEBUG_16 = 0xfe, /* len = 2 */
447 MCI_REMOTE_RESET = 0xff /* len = 16 */
448};
449
420enum ath_mci_gpm_coex_profile_type { 450enum ath_mci_gpm_coex_profile_type {
421 MCI_GPM_COEX_PROFILE_UNKNOWN, 451 MCI_GPM_COEX_PROFILE_UNKNOWN,
422 MCI_GPM_COEX_PROFILE_RFCOMM, 452 MCI_GPM_COEX_PROFILE_RFCOMM,
@@ -427,6 +457,132 @@ enum ath_mci_gpm_coex_profile_type {
427 MCI_GPM_COEX_PROFILE_MAX 457 MCI_GPM_COEX_PROFILE_MAX
428}; 458};
429 459
460/* MCI GPM/Coex opcode/type definitions */
461enum {
462 MCI_GPM_COEX_W_GPM_PAYLOAD = 1,
463 MCI_GPM_COEX_B_GPM_TYPE = 4,
464 MCI_GPM_COEX_B_GPM_OPCODE = 5,
465 /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
466 MCI_GPM_WLAN_CAL_W_SEQUENCE = 2,
467
468 /* MCI_GPM_COEX_VERSION_QUERY */
469 /* MCI_GPM_COEX_VERSION_RESPONSE */
470 MCI_GPM_COEX_B_MAJOR_VERSION = 6,
471 MCI_GPM_COEX_B_MINOR_VERSION = 7,
472 /* MCI_GPM_COEX_STATUS_QUERY */
473 MCI_GPM_COEX_B_BT_BITMAP = 6,
474 MCI_GPM_COEX_B_WLAN_BITMAP = 7,
475 /* MCI_GPM_COEX_HALT_BT_GPM */
476 MCI_GPM_COEX_B_HALT_STATE = 6,
477 /* MCI_GPM_COEX_WLAN_CHANNELS */
478 MCI_GPM_COEX_B_CHANNEL_MAP = 6,
479 /* MCI_GPM_COEX_BT_PROFILE_INFO */
480 MCI_GPM_COEX_B_PROFILE_TYPE = 6,
481 MCI_GPM_COEX_B_PROFILE_LINKID = 7,
482 MCI_GPM_COEX_B_PROFILE_STATE = 8,
483 MCI_GPM_COEX_B_PROFILE_ROLE = 9,
484 MCI_GPM_COEX_B_PROFILE_RATE = 10,
485 MCI_GPM_COEX_B_PROFILE_VOTYPE = 11,
486 MCI_GPM_COEX_H_PROFILE_T = 12,
487 MCI_GPM_COEX_B_PROFILE_W = 14,
488 MCI_GPM_COEX_B_PROFILE_A = 15,
489 /* MCI_GPM_COEX_BT_STATUS_UPDATE */
490 MCI_GPM_COEX_B_STATUS_TYPE = 6,
491 MCI_GPM_COEX_B_STATUS_LINKID = 7,
492 MCI_GPM_COEX_B_STATUS_STATE = 8,
493 /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
494 MCI_GPM_COEX_W_BT_FLAGS = 6,
495 MCI_GPM_COEX_B_BT_FLAGS_OP = 10
496};
497
498enum mci_gpm_subtype {
499 MCI_GPM_BT_CAL_REQ = 0,
500 MCI_GPM_BT_CAL_GRANT = 1,
501 MCI_GPM_BT_CAL_DONE = 2,
502 MCI_GPM_WLAN_CAL_REQ = 3,
503 MCI_GPM_WLAN_CAL_GRANT = 4,
504 MCI_GPM_WLAN_CAL_DONE = 5,
505 MCI_GPM_COEX_AGENT = 0x0c,
506 MCI_GPM_RSVD_PATTERN = 0xfe,
507 MCI_GPM_RSVD_PATTERN32 = 0xfefefefe,
508 MCI_GPM_BT_DEBUG = 0xff
509};
510
511enum mci_bt_state {
512 MCI_BT_SLEEP,
513 MCI_BT_AWAKE,
514 MCI_BT_CAL_START,
515 MCI_BT_CAL
516};
517
518/* Type of state query */
519enum mci_state_type {
520 MCI_STATE_ENABLE,
521 MCI_STATE_INIT_GPM_OFFSET,
522 MCI_STATE_NEXT_GPM_OFFSET,
523 MCI_STATE_LAST_GPM_OFFSET,
524 MCI_STATE_BT,
525 MCI_STATE_SET_BT_SLEEP,
526 MCI_STATE_SET_BT_AWAKE,
527 MCI_STATE_SET_BT_CAL_START,
528 MCI_STATE_SET_BT_CAL,
529 MCI_STATE_LAST_SCHD_MSG_OFFSET,
530 MCI_STATE_REMOTE_SLEEP,
531 MCI_STATE_CONT_RSSI_POWER,
532 MCI_STATE_CONT_PRIORITY,
533 MCI_STATE_CONT_TXRX,
534 MCI_STATE_RESET_REQ_WAKE,
535 MCI_STATE_SEND_WLAN_COEX_VERSION,
536 MCI_STATE_SET_BT_COEX_VERSION,
537 MCI_STATE_SEND_WLAN_CHANNELS,
538 MCI_STATE_SEND_VERSION_QUERY,
539 MCI_STATE_SEND_STATUS_QUERY,
540 MCI_STATE_NEED_FLUSH_BT_INFO,
541 MCI_STATE_SET_CONCUR_TX_PRI,
542 MCI_STATE_RECOVER_RX,
543 MCI_STATE_NEED_FTP_STOMP,
544 MCI_STATE_NEED_TUNING,
545 MCI_STATE_DEBUG,
546 MCI_STATE_MAX
547};
548
549enum mci_gpm_coex_opcode {
550 MCI_GPM_COEX_VERSION_QUERY,
551 MCI_GPM_COEX_VERSION_RESPONSE,
552 MCI_GPM_COEX_STATUS_QUERY,
553 MCI_GPM_COEX_HALT_BT_GPM,
554 MCI_GPM_COEX_WLAN_CHANNELS,
555 MCI_GPM_COEX_BT_PROFILE_INFO,
556 MCI_GPM_COEX_BT_STATUS_UPDATE,
557 MCI_GPM_COEX_BT_UPDATE_FLAGS
558};
559
560#define MCI_GPM_NOMORE 0
561#define MCI_GPM_MORE 1
562#define MCI_GPM_INVALID 0xffffffff
563
564#define MCI_GPM_RECYCLE(_p_gpm) do { \
565 *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
566 MCI_GPM_RSVD_PATTERN32; \
567} while (0)
568
569#define MCI_GPM_TYPE(_p_gpm) \
570 (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
571
572#define MCI_GPM_OPCODE(_p_gpm) \
573 (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
574
575#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \
576 *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
577} while (0)
578
579#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \
580 *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \
581 *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
582} while (0)
583
584#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
585
430struct ath9k_beacon_state { 586struct ath9k_beacon_state {
431 u32 bs_nexttbtt; 587 u32 bs_nexttbtt;
432 u32 bs_nextdtim; 588 u32 bs_nextdtim;
@@ -954,7 +1110,6 @@ bool ath9k_hw_disable(struct ath_hw *ah);
954void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test); 1110void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
955void ath9k_hw_setopmode(struct ath_hw *ah); 1111void ath9k_hw_setopmode(struct ath_hw *ah);
956void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); 1112void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
957void ath9k_hw_setbssidmask(struct ath_hw *ah);
958void ath9k_hw_write_associd(struct ath_hw *ah); 1113void ath9k_hw_write_associd(struct ath_hw *ah);
959u32 ath9k_hw_gettsf32(struct ath_hw *ah); 1114u32 ath9k_hw_gettsf32(struct ath_hw *ah);
960u64 ath9k_hw_gettsf64(struct ath_hw *ah); 1115u64 ath9k_hw_gettsf64(struct ath_hw *ah);
@@ -1047,6 +1202,32 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
1047void ath9k_hw_proc_mib_event(struct ath_hw *ah); 1202void ath9k_hw_proc_mib_event(struct ath_hw *ah);
1048void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan); 1203void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
1049 1204
1205bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
1206 u32 *payload, u8 len, bool wait_done,
1207 bool check_bt);
1208void ar9003_mci_mute_bt(struct ath_hw *ah);
1209u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
1210void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
1211 u16 len, u32 sched_addr);
1212void ar9003_mci_cleanup(struct ath_hw *ah);
1213void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
1214 bool wait_done);
1215u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
1216 u8 gpm_opcode, int time_out);
1217void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
1218void ar9003_mci_disable_interrupt(struct ath_hw *ah);
1219void ar9003_mci_enable_interrupt(struct ath_hw *ah);
1220void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
1221void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
1222 bool is_full_sleep);
1223bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
1224void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
1225void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
1226void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
1227void ar9003_mci_sync_bt_state(struct ath_hw *ah);
1228void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
1229 u32 *rx_msg_intr);
1230
1050#define ATH9K_CLOCK_RATE_CCK 22 1231#define ATH9K_CLOCK_RATE_CCK 22
1051#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 1232#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
1052#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 1233#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 5cb0599b01c2..c5df98139c4d 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -17,6 +17,7 @@
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/ath9k_platform.h> 19#include <linux/ath9k_platform.h>
20#include <linux/module.h>
20 21
21#include "ath9k.h" 22#include "ath9k.h"
22 23
@@ -257,6 +258,8 @@ static void setup_ht_cap(struct ath_softc *sc,
257 258
258 if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) 259 if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
259 max_streams = 1; 260 max_streams = 1;
261 else if (AR_SREV_9462(ah))
262 max_streams = 2;
260 else if (AR_SREV_9300_20_OR_LATER(ah)) 263 else if (AR_SREV_9300_20_OR_LATER(ah))
261 max_streams = 3; 264 max_streams = 3;
262 else 265 else
@@ -294,9 +297,22 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
294{ 297{
295 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 298 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
296 struct ath_softc *sc = hw->priv; 299 struct ath_softc *sc = hw->priv;
297 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); 300 struct ath_hw *ah = sc->sc_ah;
301 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
302 int ret;
303
304 ret = ath_reg_notifier_apply(wiphy, request, reg);
305
306 /* Set tx power */
307 if (ah->curchan) {
308 sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
309 ath9k_ps_wakeup(sc);
310 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
311 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
312 ath9k_ps_restore(sc);
313 }
298 314
299 return ath_reg_notifier_apply(wiphy, request, reg); 315 return ret;
300} 316}
301 317
302/* 318/*
@@ -407,6 +423,7 @@ fail:
407static int ath9k_init_btcoex(struct ath_softc *sc) 423static int ath9k_init_btcoex(struct ath_softc *sc)
408{ 424{
409 struct ath_txq *txq; 425 struct ath_txq *txq;
426 struct ath_hw *ah = sc->sc_ah;
410 int r; 427 int r;
411 428
412 switch (sc->sc_ah->btcoex_hw.scheme) { 429 switch (sc->sc_ah->btcoex_hw.scheme) {
@@ -423,8 +440,37 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
423 txq = sc->tx.txq_map[WME_AC_BE]; 440 txq = sc->tx.txq_map[WME_AC_BE];
424 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); 441 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
425 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 442 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
443 break;
444 case ATH_BTCOEX_CFG_MCI:
445 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
426 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; 446 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
427 INIT_LIST_HEAD(&sc->btcoex.mci.info); 447 INIT_LIST_HEAD(&sc->btcoex.mci.info);
448
449 r = ath_mci_setup(sc);
450 if (r)
451 return r;
452
453 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
454 ah->btcoex_hw.mci.ready = false;
455 ah->btcoex_hw.mci.bt_state = 0;
456 ah->btcoex_hw.mci.bt_ver_major = 3;
457 ah->btcoex_hw.mci.bt_ver_minor = 0;
458 ah->btcoex_hw.mci.bt_version_known = false;
459 ah->btcoex_hw.mci.update_2g5g = true;
460 ah->btcoex_hw.mci.is_2g = true;
461 ah->btcoex_hw.mci.wlan_channels_update = false;
462 ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
463 ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
464 ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
465 ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
466 ah->btcoex_hw.mci.query_bt = true;
467 ah->btcoex_hw.mci.unhalt_bt_gpm = true;
468 ah->btcoex_hw.mci.halted_bt_gpm = false;
469 ah->btcoex_hw.mci.need_flush_btinfo = false;
470 ah->btcoex_hw.mci.wlan_cal_seq = 0;
471 ah->btcoex_hw.mci.wlan_cal_done = 0;
472 ah->btcoex_hw.mci.config = 0x2201;
473 }
428 break; 474 break;
429 default: 475 default:
430 WARN_ON(1); 476 WARN_ON(1);
@@ -838,6 +884,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
838 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 884 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
839 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); 885 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
840 886
887 if (sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_MCI)
888 ath_mci_cleanup(sc);
889
841 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 890 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
842 if (ATH_TXQ_SETUP(sc, i)) 891 if (ATH_TXQ_SETUP(sc, i))
843 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 892 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 6a8fdf33a527..0e4fbb3bea33 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -16,6 +16,7 @@
16 16
17#include "hw.h" 17#include "hw.h"
18#include "hw-ops.h" 18#include "hw-ops.h"
19#include <linux/export.h>
19 20
20static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 21static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
21 struct ath9k_tx_queue_info *qi) 22 struct ath9k_tx_queue_info *qi)
@@ -759,7 +760,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
759 return true; 760 return true;
760 761
761 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); 762 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
762 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) 763
764 if (((host_isr & AR_INTR_MAC_IRQ) ||
765 (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
766 (host_isr != AR_INTR_SPURIOUS))
763 return true; 767 return true;
764 768
765 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); 769 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
@@ -797,6 +801,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
797{ 801{
798 struct ath_common *common = ath9k_hw_common(ah); 802 struct ath_common *common = ath9k_hw_common(ah);
799 u32 sync_default = AR_INTR_SYNC_DEFAULT; 803 u32 sync_default = AR_INTR_SYNC_DEFAULT;
804 u32 async_mask;
800 805
801 if (!(ah->imask & ATH9K_INT_GLOBAL)) 806 if (!(ah->imask & ATH9K_INT_GLOBAL))
802 return; 807 return;
@@ -811,13 +816,16 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
811 if (AR_SREV_9340(ah)) 816 if (AR_SREV_9340(ah))
812 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; 817 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
813 818
819 async_mask = AR_INTR_MAC_IRQ;
820
821 if (ah->imask & ATH9K_INT_MCI)
822 async_mask |= AR_INTR_ASYNC_MASK_MCI;
823
814 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n"); 824 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
815 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 825 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
816 if (!AR_SREV_9100(ah)) { 826 if (!AR_SREV_9100(ah)) {
817 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 827 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
818 AR_INTR_MAC_IRQ); 828 REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
819 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
820
821 829
822 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default); 830 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
823 REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default); 831 REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e43c41cff25b..7fbc4bdd4efe 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -118,7 +118,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
118 if (--sc->ps_usecount != 0) 118 if (--sc->ps_usecount != 0)
119 goto unlock; 119 goto unlock;
120 120
121 if (sc->ps_idle) 121 if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
122 mode = ATH9K_PM_FULL_SLEEP; 122 mode = ATH9K_PM_FULL_SLEEP;
123 else if (sc->ps_enabled && 123 else if (sc->ps_enabled &&
124 !(sc->ps_flags & (PS_WAIT_FOR_BEACON | 124 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
286 ath_start_ani(common); 286 ath_start_ani(common);
287 } 287 }
288 288
289 if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) { 289 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
290 struct ath_hw_antcomb_conf div_ant_conf; 290 struct ath_hw_antcomb_conf div_ant_conf;
291 u8 lna_conf; 291 u8 lna_conf;
292 292
@@ -332,7 +332,8 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
332 hchan = ah->curchan; 332 hchan = ah->curchan;
333 } 333 }
334 334
335 if (fastcc && !ath9k_hw_check_alive(ah)) 335 if (fastcc && (ah->chip_fullsleep ||
336 !ath9k_hw_check_alive(ah)))
336 fastcc = false; 337 fastcc = false;
337 338
338 if (!ath_prepare_reset(sc, retry_tx, flush)) 339 if (!ath_prepare_reset(sc, retry_tx, flush))
@@ -561,7 +562,6 @@ void ath_ani_calibrate(unsigned long data)
561 /* Long calibration runs independently of short calibration. */ 562 /* Long calibration runs independently of short calibration. */
562 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) { 563 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
563 longcal = true; 564 longcal = true;
564 ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
565 common->ani.longcal_timer = timestamp; 565 common->ani.longcal_timer = timestamp;
566 } 566 }
567 567
@@ -569,8 +569,6 @@ void ath_ani_calibrate(unsigned long data)
569 if (!common->ani.caldone) { 569 if (!common->ani.caldone) {
570 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { 570 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
571 shortcal = true; 571 shortcal = true;
572 ath_dbg(common, ATH_DBG_ANI,
573 "shortcal @%lu\n", jiffies);
574 common->ani.shortcal_timer = timestamp; 572 common->ani.shortcal_timer = timestamp;
575 common->ani.resetcal_timer = timestamp; 573 common->ani.resetcal_timer = timestamp;
576 } 574 }
@@ -584,8 +582,9 @@ void ath_ani_calibrate(unsigned long data)
584 } 582 }
585 583
586 /* Verify whether we must check ANI */ 584 /* Verify whether we must check ANI */
587 if ((timestamp - common->ani.checkani_timer) >= 585 if (sc->sc_ah->config.enable_ani
588 ah->config.ani_poll_interval) { 586 && (timestamp - common->ani.checkani_timer) >=
587 ah->config.ani_poll_interval) {
589 aniflag = true; 588 aniflag = true;
590 common->ani.checkani_timer = timestamp; 589 common->ani.checkani_timer = timestamp;
591 } 590 }
@@ -605,6 +604,11 @@ void ath_ani_calibrate(unsigned long data)
605 ah->rxchainmask, longcal); 604 ah->rxchainmask, longcal);
606 } 605 }
607 606
607 ath_dbg(common, ATH_DBG_ANI,
608 "Calibration @%lu finished: %s %s %s, caldone: %s\n", jiffies,
609 longcal ? "long" : "", shortcal ? "short" : "",
610 aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
611
608 ath9k_ps_restore(sc); 612 ath9k_ps_restore(sc);
609 613
610set_timer: 614set_timer:
@@ -640,9 +644,9 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
640 spin_lock(&sc->nodes_lock); 644 spin_lock(&sc->nodes_lock);
641 list_add(&an->list, &sc->nodes); 645 list_add(&an->list, &sc->nodes);
642 spin_unlock(&sc->nodes_lock); 646 spin_unlock(&sc->nodes_lock);
647#endif
643 an->sta = sta; 648 an->sta = sta;
644 an->vif = vif; 649 an->vif = vif;
645#endif
646 if (sc->sc_flags & SC_OP_TXAGGR) { 650 if (sc->sc_flags & SC_OP_TXAGGR) {
647 ath_tx_node_init(sc, an); 651 ath_tx_node_init(sc, an);
648 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 652 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
@@ -742,6 +746,9 @@ void ath9k_tasklet(unsigned long data)
742 if (status & ATH9K_INT_GENTIMER) 746 if (status & ATH9K_INT_GENTIMER)
743 ath_gen_timer_isr(sc->sc_ah); 747 ath_gen_timer_isr(sc->sc_ah);
744 748
749 if (status & ATH9K_INT_MCI)
750 ath_mci_intr(sc);
751
745out: 752out:
746 /* re-enable hardware interrupt */ 753 /* re-enable hardware interrupt */
747 ath9k_hw_enable_interrupts(ah); 754 ath9k_hw_enable_interrupts(ah);
@@ -764,7 +771,8 @@ irqreturn_t ath_isr(int irq, void *dev)
764 ATH9K_INT_BMISS | \ 771 ATH9K_INT_BMISS | \
765 ATH9K_INT_CST | \ 772 ATH9K_INT_CST | \
766 ATH9K_INT_TSFOOR | \ 773 ATH9K_INT_TSFOOR | \
767 ATH9K_INT_GENTIMER) 774 ATH9K_INT_GENTIMER | \
775 ATH9K_INT_MCI)
768 776
769 struct ath_softc *sc = dev; 777 struct ath_softc *sc = dev;
770 struct ath_hw *ah = sc->sc_ah; 778 struct ath_hw *ah = sc->sc_ah;
@@ -882,82 +890,6 @@ chip_reset:
882#undef SCHED_INTR 890#undef SCHED_INTR
883} 891}
884 892
885static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
886{
887 struct ath_hw *ah = sc->sc_ah;
888 struct ath_common *common = ath9k_hw_common(ah);
889 struct ieee80211_channel *channel = hw->conf.channel;
890 int r;
891
892 ath9k_ps_wakeup(sc);
893 spin_lock_bh(&sc->sc_pcu_lock);
894 atomic_set(&ah->intr_ref_cnt, -1);
895
896 ath9k_hw_configpcipowersave(ah, false);
897
898 if (!ah->curchan)
899 ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
900
901 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
902 if (r) {
903 ath_err(common,
904 "Unable to reset channel (%u MHz), reset status %d\n",
905 channel->center_freq, r);
906 }
907
908 ath_complete_reset(sc, true);
909
910 /* Enable LED */
911 ath9k_hw_cfg_output(ah, ah->led_pin,
912 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
913 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
914
915 spin_unlock_bh(&sc->sc_pcu_lock);
916
917 ath9k_ps_restore(sc);
918}
919
920void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
921{
922 struct ath_hw *ah = sc->sc_ah;
923 struct ieee80211_channel *channel = hw->conf.channel;
924 int r;
925
926 ath9k_ps_wakeup(sc);
927
928 ath_cancel_work(sc);
929
930 spin_lock_bh(&sc->sc_pcu_lock);
931
932 /*
933 * Keep the LED on when the radio is disabled
934 * during idle unassociated state.
935 */
936 if (!sc->ps_idle) {
937 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
938 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
939 }
940
941 ath_prepare_reset(sc, false, true);
942
943 if (!ah->curchan)
944 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
945
946 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
947 if (r) {
948 ath_err(ath9k_hw_common(sc->sc_ah),
949 "Unable to reset channel (%u MHz), reset status %d\n",
950 channel->center_freq, r);
951 }
952
953 ath9k_hw_phy_disable(ah);
954
955 ath9k_hw_configpcipowersave(ah, true);
956
957 spin_unlock_bh(&sc->sc_pcu_lock);
958 ath9k_ps_restore(sc);
959}
960
961static int ath_reset(struct ath_softc *sc, bool retry_tx) 893static int ath_reset(struct ath_softc *sc, bool retry_tx)
962{ 894{
963 int r; 895 int r;
@@ -1093,6 +1025,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
1093 * and then setup of the interrupt mask. 1025 * and then setup of the interrupt mask.
1094 */ 1026 */
1095 spin_lock_bh(&sc->sc_pcu_lock); 1027 spin_lock_bh(&sc->sc_pcu_lock);
1028
1029 atomic_set(&ah->intr_ref_cnt, -1);
1030
1096 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 1031 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
1097 if (r) { 1032 if (r) {
1098 ath_err(common, 1033 ath_err(common,
@@ -1119,6 +1054,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
1119 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) 1054 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1120 ah->imask |= ATH9K_INT_CST; 1055 ah->imask |= ATH9K_INT_CST;
1121 1056
1057 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
1058 ah->imask |= ATH9K_INT_MCI;
1059
1122 sc->sc_flags &= ~SC_OP_INVALID; 1060 sc->sc_flags &= ~SC_OP_INVALID;
1123 sc->sc_ah->is_monitoring = false; 1061 sc->sc_ah->is_monitoring = false;
1124 1062
@@ -1131,6 +1069,18 @@ static int ath9k_start(struct ieee80211_hw *hw)
1131 goto mutex_unlock; 1069 goto mutex_unlock;
1132 } 1070 }
1133 1071
1072 if (ah->led_pin >= 0) {
1073 ath9k_hw_cfg_output(ah, ah->led_pin,
1074 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1075 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
1076 }
1077
1078 /*
1079 * Reset key cache to sane defaults (all entries cleared) instead of
1080 * semi-random values after suspend/resume.
1081 */
1082 ath9k_cmn_init_crypto(sc->sc_ah);
1083
1134 spin_unlock_bh(&sc->sc_pcu_lock); 1084 spin_unlock_bh(&sc->sc_pcu_lock);
1135 1085
1136 if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) && 1086 if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
@@ -1176,6 +1126,13 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1176 } 1126 }
1177 } 1127 }
1178 1128
1129 /*
1130 * Cannot tx while the hardware is in full sleep, it first needs a full
1131 * chip reset to recover from that
1132 */
1133 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
1134 goto exit;
1135
1179 if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) { 1136 if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
1180 /* 1137 /*
1181 * We are using PS-Poll and mac80211 can request TX while in 1138 * We are using PS-Poll and mac80211 can request TX while in
@@ -1222,6 +1179,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1222 struct ath_softc *sc = hw->priv; 1179 struct ath_softc *sc = hw->priv;
1223 struct ath_hw *ah = sc->sc_ah; 1180 struct ath_hw *ah = sc->sc_ah;
1224 struct ath_common *common = ath9k_hw_common(ah); 1181 struct ath_common *common = ath9k_hw_common(ah);
1182 bool prev_idle;
1225 1183
1226 mutex_lock(&sc->mutex); 1184 mutex_lock(&sc->mutex);
1227 1185
@@ -1252,35 +1210,45 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1252 * before setting the invalid flag. */ 1210 * before setting the invalid flag. */
1253 ath9k_hw_disable_interrupts(ah); 1211 ath9k_hw_disable_interrupts(ah);
1254 1212
1255 if (!(sc->sc_flags & SC_OP_INVALID)) { 1213 spin_unlock_bh(&sc->sc_pcu_lock);
1256 ath_drain_all_txq(sc, false); 1214
1257 ath_stoprecv(sc); 1215 /* we can now sync irq and kill any running tasklets, since we already
1258 ath9k_hw_phy_disable(ah); 1216 * disabled interrupts and not holding a spin lock */
1259 } else 1217 synchronize_irq(sc->irq);
1260 sc->rx.rxlink = NULL; 1218 tasklet_kill(&sc->intr_tq);
1219 tasklet_kill(&sc->bcon_tasklet);
1220
1221 prev_idle = sc->ps_idle;
1222 sc->ps_idle = true;
1223
1224 spin_lock_bh(&sc->sc_pcu_lock);
1225
1226 if (ah->led_pin >= 0) {
1227 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
1228 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
1229 }
1230
1231 ath_prepare_reset(sc, false, true);
1261 1232
1262 if (sc->rx.frag) { 1233 if (sc->rx.frag) {
1263 dev_kfree_skb_any(sc->rx.frag); 1234 dev_kfree_skb_any(sc->rx.frag);
1264 sc->rx.frag = NULL; 1235 sc->rx.frag = NULL;
1265 } 1236 }
1266 1237
1267 /* disable HAL and put h/w to sleep */ 1238 if (!ah->curchan)
1268 ath9k_hw_disable(ah); 1239 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
1240
1241 ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
1242 ath9k_hw_phy_disable(ah);
1269 1243
1270 spin_unlock_bh(&sc->sc_pcu_lock); 1244 ath9k_hw_configpcipowersave(ah, true);
1271 1245
1272 /* we can now sync irq and kill any running tasklets, since we already 1246 spin_unlock_bh(&sc->sc_pcu_lock);
1273 * disabled interrupts and not holding a spin lock */
1274 synchronize_irq(sc->irq);
1275 tasklet_kill(&sc->intr_tq);
1276 tasklet_kill(&sc->bcon_tasklet);
1277 1247
1278 ath9k_ps_restore(sc); 1248 ath9k_ps_restore(sc);
1279 1249
1280 sc->ps_idle = true;
1281 ath_radio_disable(sc, hw);
1282
1283 sc->sc_flags |= SC_OP_INVALID; 1250 sc->sc_flags |= SC_OP_INVALID;
1251 sc->ps_idle = prev_idle;
1284 1252
1285 mutex_unlock(&sc->mutex); 1253 mutex_unlock(&sc->mutex);
1286 1254
@@ -1620,8 +1588,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1620 struct ath_hw *ah = sc->sc_ah; 1588 struct ath_hw *ah = sc->sc_ah;
1621 struct ath_common *common = ath9k_hw_common(ah); 1589 struct ath_common *common = ath9k_hw_common(ah);
1622 struct ieee80211_conf *conf = &hw->conf; 1590 struct ieee80211_conf *conf = &hw->conf;
1623 bool disable_radio = false;
1624 1591
1592 ath9k_ps_wakeup(sc);
1625 mutex_lock(&sc->mutex); 1593 mutex_lock(&sc->mutex);
1626 1594
1627 /* 1595 /*
@@ -1632,13 +1600,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1632 */ 1600 */
1633 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1601 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1634 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1602 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1635 if (!sc->ps_idle) { 1603 if (sc->ps_idle)
1636 ath_radio_enable(sc, hw); 1604 ath_cancel_work(sc);
1637 ath_dbg(common, ATH_DBG_CONFIG,
1638 "not-idle: enabling radio\n");
1639 } else {
1640 disable_radio = true;
1641 }
1642 } 1605 }
1643 1606
1644 /* 1607 /*
@@ -1745,18 +1708,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1745 ath_dbg(common, ATH_DBG_CONFIG, 1708 ath_dbg(common, ATH_DBG_CONFIG,
1746 "Set power: %d\n", conf->power_level); 1709 "Set power: %d\n", conf->power_level);
1747 sc->config.txpowlimit = 2 * conf->power_level; 1710 sc->config.txpowlimit = 2 * conf->power_level;
1748 ath9k_ps_wakeup(sc);
1749 ath9k_cmn_update_txpow(ah, sc->curtxpow, 1711 ath9k_cmn_update_txpow(ah, sc->curtxpow,
1750 sc->config.txpowlimit, &sc->curtxpow); 1712 sc->config.txpowlimit, &sc->curtxpow);
1751 ath9k_ps_restore(sc);
1752 }
1753
1754 if (disable_radio) {
1755 ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
1756 ath_radio_disable(sc, hw);
1757 } 1713 }
1758 1714
1759 mutex_unlock(&sc->mutex); 1715 mutex_unlock(&sc->mutex);
1716 ath9k_ps_restore(sc);
1760 1717
1761 return 0; 1718 return 0;
1762} 1719}
@@ -1916,7 +1873,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1916 if (ath9k_modparam_nohwcrypt) 1873 if (ath9k_modparam_nohwcrypt)
1917 return -ENOSPC; 1874 return -ENOSPC;
1918 1875
1919 if (vif->type == NL80211_IFTYPE_ADHOC && 1876 if ((vif->type == NL80211_IFTYPE_ADHOC ||
1877 vif->type == NL80211_IFTYPE_MESH_POINT) &&
1920 (key->cipher == WLAN_CIPHER_SUITE_TKIP || 1878 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
1921 key->cipher == WLAN_CIPHER_SUITE_CCMP) && 1879 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
1922 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 1880 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
@@ -2324,9 +2282,6 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2324 return; 2282 return;
2325 } 2283 }
2326 2284
2327 if (drop)
2328 timeout = 1;
2329
2330 for (j = 0; j < timeout; j++) { 2285 for (j = 0; j < timeout; j++) {
2331 bool npend = false; 2286 bool npend = false;
2332 2287
@@ -2344,21 +2299,22 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2344 } 2299 }
2345 2300
2346 if (!npend) 2301 if (!npend)
2347 goto out; 2302 break;
2348 } 2303 }
2349 2304
2350 ath9k_ps_wakeup(sc); 2305 if (drop) {
2351 spin_lock_bh(&sc->sc_pcu_lock); 2306 ath9k_ps_wakeup(sc);
2352 drain_txq = ath_drain_all_txq(sc, false); 2307 spin_lock_bh(&sc->sc_pcu_lock);
2353 spin_unlock_bh(&sc->sc_pcu_lock); 2308 drain_txq = ath_drain_all_txq(sc, false);
2309 spin_unlock_bh(&sc->sc_pcu_lock);
2354 2310
2355 if (!drain_txq) 2311 if (!drain_txq)
2356 ath_reset(sc, false); 2312 ath_reset(sc, false);
2357 2313
2358 ath9k_ps_restore(sc); 2314 ath9k_ps_restore(sc);
2359 ieee80211_wake_queues(hw); 2315 ieee80211_wake_queues(hw);
2316 }
2360 2317
2361out:
2362 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); 2318 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
2363 mutex_unlock(&sc->mutex); 2319 mutex_unlock(&sc->mutex);
2364} 2320}
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 0fbb141bc302..691bf47906e2 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -14,6 +14,9 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/dma-mapping.h>
18#include <linux/slab.h>
19
17#include "ath9k.h" 20#include "ath9k.h"
18#include "mci.h" 21#include "mci.h"
19 22
@@ -181,8 +184,58 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
181 ath9k_btcoex_timer_resume(sc); 184 ath9k_btcoex_timer_resume(sc);
182} 185}
183 186
184void ath_mci_process_profile(struct ath_softc *sc, 187
185 struct ath_mci_profile_info *info) 188static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
189{
190 struct ath_hw *ah = sc->sc_ah;
191 struct ath_common *common = ath9k_hw_common(ah);
192 u32 payload[4] = {0, 0, 0, 0};
193
194 switch (opcode) {
195 case MCI_GPM_BT_CAL_REQ:
196
197 ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_REQ\n");
198
199 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
200 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
201 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
202 } else
203 ath_dbg(common, ATH_DBG_MCI,
204 "MCI State mismatches: %d\n",
205 ar9003_mci_state(ah, MCI_STATE_BT, NULL));
206
207 break;
208
209 case MCI_GPM_BT_CAL_DONE:
210
211 ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_DONE\n");
212
213 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
214 ath_dbg(common, ATH_DBG_MCI, "MCI error illegal!\n");
215 else
216 ath_dbg(common, ATH_DBG_MCI, "MCI BT not in CAL state\n");
217
218 break;
219
220 case MCI_GPM_BT_CAL_GRANT:
221
222 ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_GRANT\n");
223
224 /* Send WLAN_CAL_DONE for now */
225 ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_DONE\n");
226 MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
227 ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
228 16, false, true);
229 break;
230
231 default:
232 ath_dbg(common, ATH_DBG_MCI, "MCI Unknown GPM CAL message\n");
233 break;
234 }
235}
236
237static void ath_mci_process_profile(struct ath_softc *sc,
238 struct ath_mci_profile_info *info)
186{ 239{
187 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 240 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
188 struct ath_btcoex *btcoex = &sc->btcoex; 241 struct ath_btcoex *btcoex = &sc->btcoex;
@@ -208,8 +261,8 @@ void ath_mci_process_profile(struct ath_softc *sc,
208 ath_mci_update_scheme(sc); 261 ath_mci_update_scheme(sc);
209} 262}
210 263
211void ath_mci_process_status(struct ath_softc *sc, 264static void ath_mci_process_status(struct ath_softc *sc,
212 struct ath_mci_profile_status *status) 265 struct ath_mci_profile_status *status)
213{ 266{
214 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 267 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
215 struct ath_btcoex *btcoex = &sc->btcoex; 268 struct ath_btcoex *btcoex = &sc->btcoex;
@@ -252,3 +305,369 @@ void ath_mci_process_status(struct ath_softc *sc,
252 if (old_num_mgmt != mci->num_mgmt) 305 if (old_num_mgmt != mci->num_mgmt)
253 ath_mci_update_scheme(sc); 306 ath_mci_update_scheme(sc);
254} 307}
308
309static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
310{
311 struct ath_hw *ah = sc->sc_ah;
312 struct ath_mci_profile_info profile_info;
313 struct ath_mci_profile_status profile_status;
314 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
315 u32 version;
316 u8 major;
317 u8 minor;
318 u32 seq_num;
319
320 switch (opcode) {
321
322 case MCI_GPM_COEX_VERSION_QUERY:
323 ath_dbg(common, ATH_DBG_MCI,
324 "MCI Recv GPM COEX Version Query.\n");
325 version = ar9003_mci_state(ah,
326 MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
327 break;
328
329 case MCI_GPM_COEX_VERSION_RESPONSE:
330 ath_dbg(common, ATH_DBG_MCI,
331 "MCI Recv GPM COEX Version Response.\n");
332 major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
333 minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
334 ath_dbg(common, ATH_DBG_MCI,
335 "MCI BT Coex version: %d.%d\n", major, minor);
336 version = (major << 8) + minor;
337 version = ar9003_mci_state(ah,
338 MCI_STATE_SET_BT_COEX_VERSION, &version);
339 break;
340
341 case MCI_GPM_COEX_STATUS_QUERY:
342 ath_dbg(common, ATH_DBG_MCI,
343 "MCI Recv GPM COEX Status Query = 0x%02x.\n",
344 *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
345 ar9003_mci_state(ah,
346 MCI_STATE_SEND_WLAN_CHANNELS, NULL);
347 break;
348
349 case MCI_GPM_COEX_BT_PROFILE_INFO:
350 ath_dbg(common, ATH_DBG_MCI,
351 "MCI Recv GPM Coex BT profile info\n");
352 memcpy(&profile_info,
353 (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
354
355 if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
356 || (profile_info.type >=
357 MCI_GPM_COEX_PROFILE_MAX)) {
358
359 ath_dbg(common, ATH_DBG_MCI,
360 "illegal profile type = %d,"
361 "state = %d\n", profile_info.type,
362 profile_info.start);
363 break;
364 }
365
366 ath_mci_process_profile(sc, &profile_info);
367 break;
368
369 case MCI_GPM_COEX_BT_STATUS_UPDATE:
370 profile_status.is_link = *(rx_payload +
371 MCI_GPM_COEX_B_STATUS_TYPE);
372 profile_status.conn_handle = *(rx_payload +
373 MCI_GPM_COEX_B_STATUS_LINKID);
374 profile_status.is_critical = *(rx_payload +
375 MCI_GPM_COEX_B_STATUS_STATE);
376
377 seq_num = *((u32 *)(rx_payload + 12));
378 ath_dbg(common, ATH_DBG_MCI,
379 "MCI Recv GPM COEX BT_Status_Update: "
380 "is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
381 profile_status.is_link, profile_status.conn_handle,
382 profile_status.is_critical, seq_num);
383
384 ath_mci_process_status(sc, &profile_status);
385 break;
386
387 default:
388 ath_dbg(common, ATH_DBG_MCI,
389 "MCI Unknown GPM COEX message = 0x%02x\n", opcode);
390 break;
391 }
392}
393
394static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
395{
396 int error = 0;
397
398 buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
399 &buf->bf_paddr, GFP_KERNEL);
400
401 if (buf->bf_addr == NULL) {
402 error = -ENOMEM;
403 goto fail;
404 }
405
406 return 0;
407
408fail:
409 memset(buf, 0, sizeof(*buf));
410 return error;
411}
412
413static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
414{
415 if (buf->bf_addr) {
416 dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
417 buf->bf_paddr);
418 memset(buf, 0, sizeof(*buf));
419 }
420}
421
422int ath_mci_setup(struct ath_softc *sc)
423{
424 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
425 struct ath_mci_coex *mci = &sc->mci_coex;
426 int error = 0;
427
428 mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
429
430 if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
431 ath_dbg(common, ATH_DBG_FATAL, "MCI buffer alloc failed\n");
432 error = -ENOMEM;
433 goto fail;
434 }
435
436 mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
437
438 memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
439 mci->sched_buf.bf_len);
440
441 mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
442 mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
443 mci->sched_buf.bf_len;
444 mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
445
446 /* initialize the buffer */
447 memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
448
449 ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
450 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
451 mci->sched_buf.bf_paddr);
452fail:
453 return error;
454}
455
456void ath_mci_cleanup(struct ath_softc *sc)
457{
458 struct ath_hw *ah = sc->sc_ah;
459 struct ath_mci_coex *mci = &sc->mci_coex;
460
461 /*
462 * both schedule and gpm buffers will be released
463 */
464 ath_mci_buf_free(sc, &mci->sched_buf);
465 ar9003_mci_cleanup(ah);
466}
467
468void ath_mci_intr(struct ath_softc *sc)
469{
470 struct ath_mci_coex *mci = &sc->mci_coex;
471 struct ath_hw *ah = sc->sc_ah;
472 struct ath_common *common = ath9k_hw_common(ah);
473 u32 mci_int, mci_int_rxmsg;
474 u32 offset, subtype, opcode;
475 u32 *pgpm;
476 u32 more_data = MCI_GPM_MORE;
477 bool skip_gpm = false;
478
479 ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
480
481 if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
482
483 ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
484 ath_dbg(common, ATH_DBG_MCI,
485 "MCI interrupt but MCI disabled\n");
486
487 ath_dbg(common, ATH_DBG_MCI,
488 "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
489 mci_int, mci_int_rxmsg);
490 return;
491 }
492
493 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
494 u32 payload[4] = { 0xffffffff, 0xffffffff,
495 0xffffffff, 0xffffff00};
496
497 /*
498 * The following REMOTE_RESET and SYS_WAKING used to sent
499 * only when BT wake up. Now they are always sent, as a
500 * recovery method to reset BT MCI's RX alignment.
501 */
502 ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send REMOTE_RESET\n");
503
504 ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
505 payload, 16, true, false);
506 ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send SYS_WAKING\n");
507 ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
508 NULL, 0, true, false);
509
510 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
511 ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
512
513 /*
514 * always do this for recovery and 2G/5G toggling and LNA_TRANS
515 */
516 ath_dbg(common, ATH_DBG_MCI, "MCI Set BT state to AWAKE.\n");
517 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
518 }
519
520 /* Processing SYS_WAKING/SYS_SLEEPING */
521 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
522 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
523
524 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
525
526 if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
527 == MCI_BT_SLEEP)
528 ath_dbg(common, ATH_DBG_MCI,
529 "MCI BT stays in sleep mode\n");
530 else {
531 ath_dbg(common, ATH_DBG_MCI,
532 "MCI Set BT state to AWAKE.\n");
533 ar9003_mci_state(ah,
534 MCI_STATE_SET_BT_AWAKE, NULL);
535 }
536 } else
537 ath_dbg(common, ATH_DBG_MCI,
538 "MCI BT stays in AWAKE mode.\n");
539 }
540
541 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
542
543 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
544
545 if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
546
547 if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
548 == MCI_BT_AWAKE)
549 ath_dbg(common, ATH_DBG_MCI,
550 "MCI BT stays in AWAKE mode.\n");
551 else {
552 ath_dbg(common, ATH_DBG_MCI,
553 "MCI SetBT state to SLEEP\n");
554 ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
555 NULL);
556 }
557 } else
558 ath_dbg(common, ATH_DBG_MCI,
559 "MCI BT stays in SLEEP mode\n");
560 }
561
562 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
563 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
564
565 ath_dbg(common, ATH_DBG_MCI, "MCI RX broken, skip GPM msgs\n");
566 ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
567 skip_gpm = true;
568 }
569
570 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
571
572 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
573 offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
574 NULL);
575 }
576
577 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
578
579 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
580
581 while (more_data == MCI_GPM_MORE) {
582
583 pgpm = mci->gpm_buf.bf_addr;
584 offset = ar9003_mci_state(ah,
585 MCI_STATE_NEXT_GPM_OFFSET, &more_data);
586
587 if (offset == MCI_GPM_INVALID)
588 break;
589
590 pgpm += (offset >> 2);
591
592 /*
593 * The first dword is timer.
594 * The real data starts from 2nd dword.
595 */
596
597 subtype = MCI_GPM_TYPE(pgpm);
598 opcode = MCI_GPM_OPCODE(pgpm);
599
600 if (!skip_gpm) {
601
602 if (MCI_GPM_IS_CAL_TYPE(subtype))
603 ath_mci_cal_msg(sc, subtype,
604 (u8 *) pgpm);
605 else {
606 switch (subtype) {
607 case MCI_GPM_COEX_AGENT:
608 ath_mci_msg(sc, opcode,
609 (u8 *) pgpm);
610 break;
611 default:
612 break;
613 }
614 }
615 }
616 MCI_GPM_RECYCLE(pgpm);
617 }
618 }
619
620 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
621
622 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
623 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
624
625 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
626 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
627 ath_dbg(common, ATH_DBG_MCI, "MCI LNA_INFO\n");
628 }
629
630 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
631
632 int value_dbm = ar9003_mci_state(ah,
633 MCI_STATE_CONT_RSSI_POWER, NULL);
634
635 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
636
637 if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
638 ath_dbg(common, ATH_DBG_MCI,
639 "MCI CONT_INFO: "
640 "(tx) pri = %d, pwr = %d dBm\n",
641 ar9003_mci_state(ah,
642 MCI_STATE_CONT_PRIORITY, NULL),
643 value_dbm);
644 else
645 ath_dbg(common, ATH_DBG_MCI,
646 "MCI CONT_INFO:"
647 "(rx) pri = %d,pwr = %d dBm\n",
648 ar9003_mci_state(ah,
649 MCI_STATE_CONT_PRIORITY, NULL),
650 value_dbm);
651 }
652
653 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
654 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
655 ath_dbg(common, ATH_DBG_MCI, "MCI CONT_NACK\n");
656 }
657
658 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
659 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
660 ath_dbg(common, ATH_DBG_MCI, "MCI CONT_RST\n");
661 }
662 }
663
664 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
665 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
666 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
667 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
668
669 if (mci_int_rxmsg & 0xfffffffe)
670 ath_dbg(common, ATH_DBG_MCI,
671 "MCI not processed mci_int_rxmsg = 0x%x\n",
672 mci_int_rxmsg);
673}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index 9590c61822d1..29e3e51d078f 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -17,6 +17,9 @@
17#ifndef MCI_H 17#ifndef MCI_H
18#define MCI_H 18#define MCI_H
19 19
20#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */
21#define ATH_MCI_GPM_MAX_ENTRY 16
22#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16)
20#define ATH_MCI_DEF_BT_PERIOD 40 23#define ATH_MCI_DEF_BT_PERIOD 40
21#define ATH_MCI_BDR_DUTY_CYCLE 20 24#define ATH_MCI_BDR_DUTY_CYCLE 20
22#define ATH_MCI_MAX_DUTY_CYCLE 90 25#define ATH_MCI_MAX_DUTY_CYCLE 90
@@ -110,9 +113,22 @@ struct ath_mci_profile {
110 u8 num_bdr; 113 u8 num_bdr;
111}; 114};
112 115
116
117struct ath_mci_buf {
118 void *bf_addr; /* virtual addr of desc */
119 dma_addr_t bf_paddr; /* physical addr of buffer */
120 u32 bf_len; /* len of data */
121};
122
123struct ath_mci_coex {
124 atomic_t mci_cal_flag;
125 struct ath_mci_buf sched_buf;
126 struct ath_mci_buf gpm_buf;
127 u32 bt_cal_start;
128};
129
113void ath_mci_flush_profile(struct ath_mci_profile *mci); 130void ath_mci_flush_profile(struct ath_mci_profile *mci);
114void ath_mci_process_profile(struct ath_softc *sc, 131int ath_mci_setup(struct ath_softc *sc);
115 struct ath_mci_profile_info *info); 132void ath_mci_cleanup(struct ath_softc *sc);
116void ath_mci_process_status(struct ath_softc *sc, 133void ath_mci_intr(struct ath_softc *sc);
117 struct ath_mci_profile_status *status);
118#endif 134#endif
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index edb0b4b3da3a..a439edc5dc06 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,6 +18,7 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/pci-aspm.h> 19#include <linux/pci-aspm.h>
20#include <linux/ath9k_platform.h> 20#include <linux/ath9k_platform.h>
21#include <linux/module.h>
21#include "ath9k.h" 22#include "ath9k.h"
22 23
23static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { 24static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
@@ -306,12 +307,11 @@ static int ath_pci_suspend(struct device *device)
306 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 307 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
307 struct ath_softc *sc = hw->priv; 308 struct ath_softc *sc = hw->priv;
308 309
309 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
310
311 /* The device has to be moved to FULLSLEEP forcibly. 310 /* The device has to be moved to FULLSLEEP forcibly.
312 * Otherwise the chip never moved to full sleep, 311 * Otherwise the chip never moved to full sleep,
313 * when no interface is up. 312 * when no interface is up.
314 */ 313 */
314 ath9k_hw_disable(sc->sc_ah);
315 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 315 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
316 316
317 return 0; 317 return 0;
@@ -320,8 +320,6 @@ static int ath_pci_suspend(struct device *device)
320static int ath_pci_resume(struct device *device) 320static int ath_pci_resume(struct device *device)
321{ 321{
322 struct pci_dev *pdev = to_pci_dev(device); 322 struct pci_dev *pdev = to_pci_dev(device);
323 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
324 struct ath_softc *sc = hw->priv;
325 u32 val; 323 u32 val;
326 324
327 /* 325 /*
@@ -333,22 +331,6 @@ static int ath_pci_resume(struct device *device)
333 if ((val & 0x0000ff00) != 0) 331 if ((val & 0x0000ff00) != 0)
334 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 332 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
335 333
336 ath9k_ps_wakeup(sc);
337 /* Enable LED */
338 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
339 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
340 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
341
342 /*
343 * Reset key cache to sane defaults (all entries cleared) instead of
344 * semi-random values after suspend/resume.
345 */
346 ath9k_cmn_init_crypto(sc->sc_ah);
347 ath9k_ps_restore(sc);
348
349 sc->ps_idle = true;
350 ath_radio_disable(sc, hw);
351
352 return 0; 334 return 0;
353} 335}
354 336
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 8448281dd069..528d5f3e868c 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/export.h>
19 20
20#include "ath9k.h" 21#include "ath9k.h"
21 22
@@ -1270,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
1270 1271
1271 ath_rc_priv->max_valid_rate = k; 1272 ath_rc_priv->max_valid_rate = k;
1272 ath_rc_sort_validrates(rate_table, ath_rc_priv); 1273 ath_rc_sort_validrates(rate_table, ath_rc_priv);
1273 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; 1274 ath_rc_priv->rate_max_phy = (k > 4) ?
1275 ath_rc_priv->valid_rate_index[k-4] :
1276 ath_rc_priv->valid_rate_index[k-1];
1274 ath_rc_priv->rate_table = rate_table; 1277 ath_rc_priv->rate_table = rate_table;
1275 1278
1276 ath_dbg(common, ATH_DBG_CONFIG, 1279 ath_dbg(common, ATH_DBG_CONFIG,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 67b862cdae6d..ad5176de07dc 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -475,7 +475,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
475 475
476 return rfilt; 476 return rfilt;
477 477
478#undef RX_FILTER_PRESERVE
479} 478}
480 479
481int ath_startrecv(struct ath_softc *sc) 480int ath_startrecv(struct ath_softc *sc)
@@ -1824,6 +1823,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1824 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1823 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1825 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1824 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1826 if (ieee80211_is_beacon(hdr->frame_control) && 1825 if (ieee80211_is_beacon(hdr->frame_control) &&
1826 !is_zero_ether_addr(common->curbssid) &&
1827 !compare_ether_addr(hdr->addr3, common->curbssid)) 1827 !compare_ether_addr(hdr->addr3, common->curbssid))
1828 rs.is_mybeacon = true; 1828 rs.is_mybeacon = true;
1829 else 1829 else
@@ -1838,11 +1838,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1838 if (sc->sc_flags & SC_OP_RXFLUSH) 1838 if (sc->sc_flags & SC_OP_RXFLUSH)
1839 goto requeue_drop_frag; 1839 goto requeue_drop_frag;
1840 1840
1841 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1842 rxs, &decrypt_error);
1843 if (retval)
1844 goto requeue_drop_frag;
1845
1846 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1841 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1847 if (rs.rs_tstamp > tsf_lower && 1842 if (rs.rs_tstamp > tsf_lower &&
1848 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1843 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1852,6 +1847,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1852 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1847 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1853 rxs->mactime += 0x100000000ULL; 1848 rxs->mactime += 0x100000000ULL;
1854 1849
1850 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1851 rxs, &decrypt_error);
1852 if (retval)
1853 goto requeue_drop_frag;
1854
1855 /* Ensure we always have an skb to requeue once we are done 1855 /* Ensure we always have an skb to requeue once we are done
1856 * processing the current buffer's skb */ 1856 * processing the current buffer's skb */
1857 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1857 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1923,15 +1923,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1923 skb = hdr_skb; 1923 skb = hdr_skb;
1924 } 1924 }
1925 1925
1926 /* 1926
1927 * change the default rx antenna if rx diversity chooses the 1927 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
1928 * other antenna 3 times in a row. 1928
1929 */ 1929 /*
1930 if (sc->rx.defant != rs.rs_antenna) { 1930 * change the default rx antenna if rx diversity
1931 if (++sc->rx.rxotherant >= 3) 1931 * chooses the other antenna 3 times in a row.
1932 ath_setdefantenna(sc, rs.rs_antenna); 1932 */
1933 } else { 1933 if (sc->rx.defant != rs.rs_antenna) {
1934 sc->rx.rxotherant = 0; 1934 if (++sc->rx.rxotherant >= 3)
1935 ath_setdefantenna(sc, rs.rs_antenna);
1936 } else {
1937 sc->rx.rxotherant = 0;
1938 }
1939
1935 } 1940 }
1936 1941
1937 if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 1942 if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 45910975d853..6e2f18861f5d 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1006,6 +1006,8 @@ enum {
1006#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030) 1006#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
1007#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000 1007#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
1008#define AR_INTR_ASYNC_MASK_GPIO_S 18 1008#define AR_INTR_ASYNC_MASK_GPIO_S 18
1009#define AR_INTR_ASYNC_MASK_MCI 0x00000080
1010#define AR_INTR_ASYNC_MASK_MCI_S 7
1009 1011
1010#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034) 1012#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
1011#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000 1013#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
@@ -1013,6 +1015,14 @@ enum {
1013 1015
1014#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038) 1016#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
1015#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038) 1017#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
1018#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080
1019#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \
1020 AR_INTR_ASYNC_CAUSE_MCI)
1021
1022/* Asynchronous Interrupt Enable Register */
1023#define AR_INTR_ASYNC_ENABLE_MCI 0x00000080
1024#define AR_INTR_ASYNC_ENABLE_MCI_S 7
1025
1016 1026
1017#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c) 1027#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
1018#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000 1028#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
@@ -1269,6 +1279,8 @@ enum {
1269#define AR_RTC_INTR_MASK \ 1279#define AR_RTC_INTR_MASK \
1270 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058) 1280 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
1271 1281
1282#define AR_RTC_KEEP_AWAKE 0x7034
1283
1272/* RTC_DERIVED_* - only for AR9100 */ 1284/* RTC_DERIVED_* - only for AR9100 */
1273 1285
1274#define AR_RTC_DERIVED_CLK \ 1286#define AR_RTC_DERIVED_CLK \
@@ -1555,6 +1567,8 @@ enum {
1555#define AR_DIAG_FRAME_NV0 0x00020000 1567#define AR_DIAG_FRAME_NV0 0x00020000
1556#define AR_DIAG_OBS_PT_SEL1 0x000C0000 1568#define AR_DIAG_OBS_PT_SEL1 0x000C0000
1557#define AR_DIAG_OBS_PT_SEL1_S 18 1569#define AR_DIAG_OBS_PT_SEL1_S 18
1570#define AR_DIAG_OBS_PT_SEL2 0x08000000
1571#define AR_DIAG_OBS_PT_SEL2_S 27
1558#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */ 1572#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */
1559#define AR_DIAG_IGNORE_VIRT_CS 0x00200000 1573#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
1560#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000 1574#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
@@ -1929,37 +1943,277 @@ enum {
1929#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6 1943#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
1930 1944
1931/* MCI Registers */ 1945/* MCI Registers */
1932#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c 1946
1933#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001 1947#define AR_MCI_COMMAND0 0x1800
1934#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0 1948#define AR_MCI_COMMAND0_HEADER 0xFF
1935#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002 1949#define AR_MCI_COMMAND0_HEADER_S 0
1936#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1 1950#define AR_MCI_COMMAND0_LEN 0x1f00
1937#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004 1951#define AR_MCI_COMMAND0_LEN_S 8
1938#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2 1952#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
1939#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008 1953#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
1940#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3 1954
1941#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010 1955#define AR_MCI_COMMAND1 0x1804
1942#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4 1956
1943#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020 1957#define AR_MCI_COMMAND2 0x1808
1944#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5 1958#define AR_MCI_COMMAND2_RESET_TX 0x01
1945#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040 1959#define AR_MCI_COMMAND2_RESET_TX_S 0
1946#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6 1960#define AR_MCI_COMMAND2_RESET_RX 0x02
1947#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100 1961#define AR_MCI_COMMAND2_RESET_RX_S 1
1948#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8 1962#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
1949#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200 1963#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
1950#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9 1964#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
1951#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400 1965#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
1952#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10 1966
1953#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800 1967#define AR_MCI_RX_CTRL 0x180c
1954#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11 1968
1955#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000 1969#define AR_MCI_TX_CTRL 0x1810
1956#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12 1970/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
1957#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \ 1971#define AR_MCI_TX_CTRL_CLK_DIV 0x03
1972#define AR_MCI_TX_CTRL_CLK_DIV_S 0
1973#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
1974#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
1975#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
1976#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
1977#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
1978#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
1979
1980#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
1981#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
1982#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
1983#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
1984#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
1985
1986#define AR_MCI_SCHD_TABLE_0 0x1818
1987#define AR_MCI_SCHD_TABLE_1 0x181c
1988#define AR_MCI_GPM_0 0x1820
1989#define AR_MCI_GPM_1 0x1824
1990#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
1991#define AR_MCI_GPM_WRITE_PTR_S 16
1992#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
1993#define AR_MCI_GPM_BUF_LEN_S 0
1994
1995#define AR_MCI_INTERRUPT_RAW 0x1828
1996#define AR_MCI_INTERRUPT_EN 0x182c
1997#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
1998#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
1999#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
2000#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
2001#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
2002#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
2003#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
2004#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
2005#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
2006#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
2007#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
2008#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
2009#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
2010#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
2011#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
2012#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
2013#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
2014#define AR_MCI_INTERRUPT_RX_MSG_S 9
2015#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
2016#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
2017#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
2018#define AR_MCI_INTERRUPT_BT_PRI_S 11
2019#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
2020#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
2021#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
2022#define AR_MCI_INTERRUPT_BT_FREQ_S 28
2023#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
2024#define AR_MCI_INTERRUPT_BT_STOMP_S 29
2025#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
2026#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
2027#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
2028#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
2029
2030#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
2031 AR_MCI_INTERRUPT_RX_INVALID_HDR | \
2032 AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
2033 AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
2034 AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
2035 AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
2036 AR_MCI_INTERRUPT_RX_MSG | \
2037 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
2038 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
2039
2040#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
2041 AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
2042 AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
2043 AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
2044
2045#define AR_MCI_REMOTE_CPU_INT 0x1830
2046#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
2047#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
2048#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
2049#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
2050#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
2051#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
2052#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
2053#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
2054#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
2055#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
2056#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
2057#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
2058#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
2059#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
2060#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
2061#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
2062#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
2063#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
2064#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
2065#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
2066#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
2067#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
2068#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
2069#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
2070#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
2071#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
2072#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
2073#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
1958 AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \ 2074 AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
1959 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \ 2075 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
1960 AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \ 2076 AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
1961 AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \ 2077 AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
1962 AR_MCI_INTERRUPT_RX_MSG_CONT_RST) 2078 AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
1963 2079
2080#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
2081 AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
2082 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
2083 AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
2084 AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
2085 AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
2086 AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
2087 AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
2088 AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
2089 AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
2090 AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
2091
2092#define AR_MCI_CPU_INT 0x1840
2093
2094#define AR_MCI_RX_STATUS 0x1844
2095#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
2096#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
2097#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
2098#define AR_MCI_RX_REMOTE_SLEEP_S 12
2099#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
2100#define AR_MCI_RX_MCI_CLK_REQ_S 13
2101
2102#define AR_MCI_CONT_STATUS 0x1848
2103#define AR_MCI_CONT_RSSI_POWER 0x000000FF
2104#define AR_MCI_CONT_RSSI_POWER_S 0
2105#define AR_MCI_CONT_RRIORITY 0x0000FF00
2106#define AR_MCI_CONT_RRIORITY_S 8
2107#define AR_MCI_CONT_TXRX 0x00010000
2108#define AR_MCI_CONT_TXRX_S 16
2109
2110#define AR_MCI_BT_PRI0 0x184c
2111#define AR_MCI_BT_PRI1 0x1850
2112#define AR_MCI_BT_PRI2 0x1854
2113#define AR_MCI_BT_PRI3 0x1858
2114#define AR_MCI_BT_PRI 0x185c
2115#define AR_MCI_WL_FREQ0 0x1860
2116#define AR_MCI_WL_FREQ1 0x1864
2117#define AR_MCI_WL_FREQ2 0x1868
2118#define AR_MCI_GAIN 0x186c
2119#define AR_MCI_WBTIMER1 0x1870
2120#define AR_MCI_WBTIMER2 0x1874
2121#define AR_MCI_WBTIMER3 0x1878
2122#define AR_MCI_WBTIMER4 0x187c
2123#define AR_MCI_MAXGAIN 0x1880
2124#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
2125#define AR_MCI_HW_SCHD_TBL_D0 0x1888
2126#define AR_MCI_HW_SCHD_TBL_D1 0x188c
2127#define AR_MCI_HW_SCHD_TBL_D2 0x1890
2128#define AR_MCI_HW_SCHD_TBL_D3 0x1894
2129#define AR_MCI_TX_PAYLOAD0 0x1898
2130#define AR_MCI_TX_PAYLOAD1 0x189c
2131#define AR_MCI_TX_PAYLOAD2 0x18a0
2132#define AR_MCI_TX_PAYLOAD3 0x18a4
2133#define AR_BTCOEX_WBTIMER 0x18a8
2134
2135#define AR_BTCOEX_CTRL 0x18ac
2136#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
2137#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
2138#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
2139#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
2140#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
2141#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
2142#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
2143#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
2144#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
2145#define AR_BTCOEX_CTRL_PA_SHARED_S 4
2146#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
2147#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
2148#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
2149#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
2150#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
2151#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
2152#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
2153#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
2154#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
2155#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
2156#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
2157#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
2158#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
2159#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
2160#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
2161#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
2162#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
2163#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
2164#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
2165#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
2166#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
2167#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
2168
2169#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
2170#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
2171#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
2172#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
2173#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
2174#define AR_BTCOEX_WL_LNA 0x1940
2175#define AR_BTCOEX_RFGAIN_CTRL 0x1944
2176
2177#define AR_BTCOEX_CTRL2 0x1948
2178#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
2179#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
2180#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
2181#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
2182#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
2183#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
2184#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
2185#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
2186#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
2187#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
2188#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
2189#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
2190
2191#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
2192#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
2193#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
2194#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
2195#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
2196#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
2197#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
2198#define AR_GLB_WLAN_UART_INTF_EN_S 17
2199#define AR_GLB_DS_JTAG_DISABLE 0x00040000
2200#define AR_GLB_DS_JTAG_DISABLE_S 18
2201
2202#define AR_BTCOEX_RC 0x194c
2203#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
2204#define AR_BTCOEX_DBG 0x1a50
2205#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
2206#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
2207
2208#define AR_MCI_SCHD_TABLE_2 0x1a5c
2209#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
2210#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
2211#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
2212#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
2213
2214#define AR_BTCOEX_CTRL3 0x1a60
2215#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
2216#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
2217
1964 2218
1965#endif 2219#endif
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 55d077e7135d..23e80e63bca9 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq); 53 int tx_flags, struct ath_txq *txq);
54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55 struct ath_txq *txq, struct list_head *bf_q, 55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar); 56 struct ath_tx_status *ts, int txok);
57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head, bool internal); 58 struct list_head *head, bool internal);
59static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 59static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
@@ -150,6 +150,12 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; 150 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
151} 151}
152 152
153static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
154{
155 ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
156 seqno << IEEE80211_SEQ_SEQ_SHIFT);
157}
158
153static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 159static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
154{ 160{
155 struct ath_txq *txq = tid->ac->txq; 161 struct ath_txq *txq = tid->ac->txq;
@@ -158,28 +164,33 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
158 struct list_head bf_head; 164 struct list_head bf_head;
159 struct ath_tx_status ts; 165 struct ath_tx_status ts;
160 struct ath_frame_info *fi; 166 struct ath_frame_info *fi;
167 bool sendbar = false;
161 168
162 INIT_LIST_HEAD(&bf_head); 169 INIT_LIST_HEAD(&bf_head);
163 170
164 memset(&ts, 0, sizeof(ts)); 171 memset(&ts, 0, sizeof(ts));
165 spin_lock_bh(&txq->axq_lock);
166 172
167 while ((skb = __skb_dequeue(&tid->buf_q))) { 173 while ((skb = __skb_dequeue(&tid->buf_q))) {
168 fi = get_frame_info(skb); 174 fi = get_frame_info(skb);
169 bf = fi->bf; 175 bf = fi->bf;
170 176
171 spin_unlock_bh(&txq->axq_lock);
172 if (bf && fi->retries) { 177 if (bf && fi->retries) {
173 list_add_tail(&bf->list, &bf_head); 178 list_add_tail(&bf->list, &bf_head);
174 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 179 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
175 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); 180 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
181 sendbar = true;
176 } else { 182 } else {
177 ath_tx_send_normal(sc, txq, NULL, skb); 183 ath_tx_send_normal(sc, txq, NULL, skb);
178 } 184 }
179 spin_lock_bh(&txq->axq_lock);
180 } 185 }
181 186
182 spin_unlock_bh(&txq->axq_lock); 187 if (tid->baw_head == tid->baw_tail) {
188 tid->state &= ~AGGR_ADDBA_COMPLETE;
189 tid->state &= ~AGGR_CLEANUP;
190 }
191
192 if (sendbar)
193 ath_send_bar(tid, tid->seq_start);
183} 194}
184 195
185static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 196static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -195,6 +206,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
195 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { 206 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
196 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 207 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
197 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 208 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
209 if (tid->bar_index >= 0)
210 tid->bar_index--;
198 } 211 }
199} 212}
200 213
@@ -238,9 +251,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
238 bf = fi->bf; 251 bf = fi->bf;
239 252
240 if (!bf) { 253 if (!bf) {
241 spin_unlock(&txq->axq_lock);
242 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); 254 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
243 spin_lock(&txq->axq_lock);
244 continue; 255 continue;
245 } 256 }
246 257
@@ -249,24 +260,26 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
249 if (fi->retries) 260 if (fi->retries)
250 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 261 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
251 262
252 spin_unlock(&txq->axq_lock); 263 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
253 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
254 spin_lock(&txq->axq_lock);
255 } 264 }
256 265
257 tid->seq_next = tid->seq_start; 266 tid->seq_next = tid->seq_start;
258 tid->baw_tail = tid->baw_head; 267 tid->baw_tail = tid->baw_head;
268 tid->bar_index = -1;
259} 269}
260 270
261static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 271static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
262 struct sk_buff *skb) 272 struct sk_buff *skb, int count)
263{ 273{
264 struct ath_frame_info *fi = get_frame_info(skb); 274 struct ath_frame_info *fi = get_frame_info(skb);
265 struct ath_buf *bf = fi->bf; 275 struct ath_buf *bf = fi->bf;
266 struct ieee80211_hdr *hdr; 276 struct ieee80211_hdr *hdr;
277 int prev = fi->retries;
267 278
268 TX_STAT_INC(txq->axq_qnum, a_retries); 279 TX_STAT_INC(txq->axq_qnum, a_retries);
269 if (fi->retries++ > 0) 280 fi->retries += count;
281
282 if (prev > 0)
270 return; 283 return;
271 284
272 hdr = (struct ieee80211_hdr *)skb->data; 285 hdr = (struct ieee80211_hdr *)skb->data;
@@ -365,7 +378,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
365 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 378 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
366 struct list_head bf_head; 379 struct list_head bf_head;
367 struct sk_buff_head bf_pending; 380 struct sk_buff_head bf_pending;
368 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 381 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
369 u32 ba[WME_BA_BMP_SIZE >> 5]; 382 u32 ba[WME_BA_BMP_SIZE >> 5];
370 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 383 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
371 bool rc_update = true; 384 bool rc_update = true;
@@ -374,6 +387,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
374 int nframes; 387 int nframes;
375 u8 tidno; 388 u8 tidno;
376 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); 389 bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
390 int i, retries;
391 int bar_index = -1;
377 392
378 skb = bf->bf_mpdu; 393 skb = bf->bf_mpdu;
379 hdr = (struct ieee80211_hdr *)skb->data; 394 hdr = (struct ieee80211_hdr *)skb->data;
@@ -382,6 +397,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
382 397
383 memcpy(rates, tx_info->control.rates, sizeof(rates)); 398 memcpy(rates, tx_info->control.rates, sizeof(rates));
384 399
400 retries = ts->ts_longretry + 1;
401 for (i = 0; i < ts->ts_rateindex; i++)
402 retries += rates[i].count;
403
385 rcu_read_lock(); 404 rcu_read_lock();
386 405
387 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); 406 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
@@ -395,8 +414,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
395 if (!bf->bf_stale || bf_next != NULL) 414 if (!bf->bf_stale || bf_next != NULL)
396 list_move_tail(&bf->list, &bf_head); 415 list_move_tail(&bf->list, &bf_head);
397 416
398 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 417 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
399 0, 0);
400 418
401 bf = bf_next; 419 bf = bf_next;
402 } 420 }
@@ -406,6 +424,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
406 an = (struct ath_node *)sta->drv_priv; 424 an = (struct ath_node *)sta->drv_priv;
407 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 425 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
408 tid = ATH_AN_2_TID(an, tidno); 426 tid = ATH_AN_2_TID(an, tidno);
427 seq_first = tid->seq_start;
409 428
410 /* 429 /*
411 * The hardware occasionally sends a tx status for the wrong TID. 430 * The hardware occasionally sends a tx status for the wrong TID.
@@ -455,25 +474,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
455 } else if (!isaggr && txok) { 474 } else if (!isaggr && txok) {
456 /* transmit completion */ 475 /* transmit completion */
457 acked_cnt++; 476 acked_cnt++;
477 } else if ((tid->state & AGGR_CLEANUP) || !retry) {
478 /*
479 * cleanup in progress, just fail
480 * the un-acked sub-frames
481 */
482 txfail = 1;
483 } else if (flush) {
484 txpending = 1;
485 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
486 if (txok || !an->sleeping)
487 ath_tx_set_retry(sc, txq, bf->bf_mpdu,
488 retries);
489
490 txpending = 1;
458 } else { 491 } else {
459 if ((tid->state & AGGR_CLEANUP) || !retry) { 492 txfail = 1;
460 /* 493 txfail_cnt++;
461 * cleanup in progress, just fail 494 bar_index = max_t(int, bar_index,
462 * the un-acked sub-frames 495 ATH_BA_INDEX(seq_first, seqno));
463 */
464 txfail = 1;
465 } else if (flush) {
466 txpending = 1;
467 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
468 if (txok || !an->sleeping)
469 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
470
471 txpending = 1;
472 } else {
473 txfail = 1;
474 sendbar = 1;
475 txfail_cnt++;
476 }
477 } 496 }
478 497
479 /* 498 /*
@@ -490,9 +509,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
490 * complete the acked-ones/xretried ones; update 509 * complete the acked-ones/xretried ones; update
491 * block-ack window 510 * block-ack window
492 */ 511 */
493 spin_lock_bh(&txq->axq_lock);
494 ath_tx_update_baw(sc, tid, seqno); 512 ath_tx_update_baw(sc, tid, seqno);
495 spin_unlock_bh(&txq->axq_lock);
496 513
497 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 514 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
498 memcpy(tx_info->control.rates, rates, sizeof(rates)); 515 memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -501,33 +518,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
501 } 518 }
502 519
503 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 520 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
504 !txfail, sendbar); 521 !txfail);
505 } else { 522 } else {
506 /* retry the un-acked ones */ 523 /* retry the un-acked ones */
507 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { 524 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
508 if (bf->bf_next == NULL && bf_last->bf_stale) { 525 bf->bf_next == NULL && bf_last->bf_stale) {
509 struct ath_buf *tbf; 526 struct ath_buf *tbf;
510 527
511 tbf = ath_clone_txbuf(sc, bf_last); 528 tbf = ath_clone_txbuf(sc, bf_last);
512 /* 529 /*
513 * Update tx baw and complete the 530 * Update tx baw and complete the
514 * frame with failed status if we 531 * frame with failed status if we
515 * run out of tx buf. 532 * run out of tx buf.
516 */ 533 */
517 if (!tbf) { 534 if (!tbf) {
518 spin_lock_bh(&txq->axq_lock); 535 ath_tx_update_baw(sc, tid, seqno);
519 ath_tx_update_baw(sc, tid, seqno); 536
520 spin_unlock_bh(&txq->axq_lock); 537 ath_tx_complete_buf(sc, bf, txq,
521 538 &bf_head, ts, 0);
522 ath_tx_complete_buf(sc, bf, txq, 539 bar_index = max_t(int, bar_index,
523 &bf_head, 540 ATH_BA_INDEX(seq_first, seqno));
524 ts, 0, 541 break;
525 !flush);
526 break;
527 }
528
529 fi->bf = tbf;
530 } 542 }
543
544 fi->bf = tbf;
531 } 545 }
532 546
533 /* 547 /*
@@ -540,12 +554,18 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
540 bf = bf_next; 554 bf = bf_next;
541 } 555 }
542 556
557 if (bar_index >= 0) {
558 u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
559 ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
560 if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
561 tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
562 }
563
543 /* prepend un-acked frames to the beginning of the pending frame queue */ 564 /* prepend un-acked frames to the beginning of the pending frame queue */
544 if (!skb_queue_empty(&bf_pending)) { 565 if (!skb_queue_empty(&bf_pending)) {
545 if (an->sleeping) 566 if (an->sleeping)
546 ieee80211_sta_set_buffered(sta, tid->tidno, true); 567 ieee80211_sta_set_buffered(sta, tid->tidno, true);
547 568
548 spin_lock_bh(&txq->axq_lock);
549 skb_queue_splice(&bf_pending, &tid->buf_q); 569 skb_queue_splice(&bf_pending, &tid->buf_q);
550 if (!an->sleeping) { 570 if (!an->sleeping) {
551 ath_tx_queue_tid(txq, tid); 571 ath_tx_queue_tid(txq, tid);
@@ -553,18 +573,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
553 if (ts->ts_status & ATH9K_TXERR_FILT) 573 if (ts->ts_status & ATH9K_TXERR_FILT)
554 tid->ac->clear_ps_filter = true; 574 tid->ac->clear_ps_filter = true;
555 } 575 }
556 spin_unlock_bh(&txq->axq_lock);
557 } 576 }
558 577
559 if (tid->state & AGGR_CLEANUP) { 578 if (tid->state & AGGR_CLEANUP)
560 ath_tx_flush_tid(sc, tid); 579 ath_tx_flush_tid(sc, tid);
561 580
562 if (tid->baw_head == tid->baw_tail) {
563 tid->state &= ~AGGR_ADDBA_COMPLETE;
564 tid->state &= ~AGGR_CLEANUP;
565 }
566 }
567
568 rcu_read_unlock(); 581 rcu_read_unlock();
569 582
570 if (needreset) { 583 if (needreset) {
@@ -618,24 +631,26 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
618 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 631 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
619 632
620 for (i = 0; i < 4; i++) { 633 for (i = 0; i < 4; i++) {
621 if (rates[i].count) { 634 int modeidx;
622 int modeidx;
623 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
624 legacy = 1;
625 break;
626 }
627 635
628 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 636 if (!rates[i].count)
629 modeidx = MCS_HT40; 637 continue;
630 else
631 modeidx = MCS_HT20;
632
633 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
634 modeidx++;
635 638
636 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; 639 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
637 max_4ms_framelen = min(max_4ms_framelen, frmlen); 640 legacy = 1;
641 break;
638 } 642 }
643
644 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
645 modeidx = MCS_HT40;
646 else
647 modeidx = MCS_HT20;
648
649 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
650 modeidx++;
651
652 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
653 max_4ms_framelen = min(max_4ms_framelen, frmlen);
639 } 654 }
640 655
641 /* 656 /*
@@ -771,8 +786,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
771 786
772 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; 787 bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
773 seqno = bf->bf_state.seqno; 788 seqno = bf->bf_state.seqno;
774 if (!bf_first)
775 bf_first = bf;
776 789
777 /* do not step over block-ack window */ 790 /* do not step over block-ack window */
778 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { 791 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
@@ -780,6 +793,21 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
780 break; 793 break;
781 } 794 }
782 795
796 if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
797 struct ath_tx_status ts = {};
798 struct list_head bf_head;
799
800 INIT_LIST_HEAD(&bf_head);
801 list_add(&bf->list, &bf_head);
802 __skb_unlink(skb, &tid->buf_q);
803 ath_tx_update_baw(sc, tid, seqno);
804 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
805 continue;
806 }
807
808 if (!bf_first)
809 bf_first = bf;
810
783 if (!rl) { 811 if (!rl) {
784 aggr_limit = ath_lookup_rate(sc, bf, tid); 812 aggr_limit = ath_lookup_rate(sc, bf, tid);
785 rl = 1; 813 rl = 1;
@@ -1122,6 +1150,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1122 txtid->state |= AGGR_ADDBA_PROGRESS; 1150 txtid->state |= AGGR_ADDBA_PROGRESS;
1123 txtid->paused = true; 1151 txtid->paused = true;
1124 *ssn = txtid->seq_start = txtid->seq_next; 1152 *ssn = txtid->seq_start = txtid->seq_next;
1153 txtid->bar_index = -1;
1125 1154
1126 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); 1155 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
1127 txtid->baw_head = txtid->baw_tail = 0; 1156 txtid->baw_head = txtid->baw_tail = 0;
@@ -1156,9 +1185,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1156 txtid->state |= AGGR_CLEANUP; 1185 txtid->state |= AGGR_CLEANUP;
1157 else 1186 else
1158 txtid->state &= ~AGGR_ADDBA_COMPLETE; 1187 txtid->state &= ~AGGR_ADDBA_COMPLETE;
1159 spin_unlock_bh(&txq->axq_lock);
1160 1188
1161 ath_tx_flush_tid(sc, txtid); 1189 ath_tx_flush_tid(sc, txtid);
1190 spin_unlock_bh(&txq->axq_lock);
1162} 1191}
1163 1192
1164void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, 1193void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -1400,8 +1429,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1400 1429
1401static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, 1430static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1402 struct list_head *list, bool retry_tx) 1431 struct list_head *list, bool retry_tx)
1403 __releases(txq->axq_lock)
1404 __acquires(txq->axq_lock)
1405{ 1432{
1406 struct ath_buf *bf, *lastbf; 1433 struct ath_buf *bf, *lastbf;
1407 struct list_head bf_head; 1434 struct list_head bf_head;
@@ -1428,13 +1455,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
1428 if (bf_is_ampdu_not_probing(bf)) 1455 if (bf_is_ampdu_not_probing(bf))
1429 txq->axq_ampdu_depth--; 1456 txq->axq_ampdu_depth--;
1430 1457
1431 spin_unlock_bh(&txq->axq_lock);
1432 if (bf_isampdu(bf)) 1458 if (bf_isampdu(bf))
1433 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, 1459 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1434 retry_tx); 1460 retry_tx);
1435 else 1461 else
1436 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1462 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
1437 spin_lock_bh(&txq->axq_lock);
1438 } 1463 }
1439} 1464}
1440 1465
@@ -1561,11 +1586,9 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1561 break; 1586 break;
1562 } 1587 }
1563 1588
1564 if (!list_empty(&ac->tid_q)) { 1589 if (!list_empty(&ac->tid_q) && !ac->sched) {
1565 if (!ac->sched) { 1590 ac->sched = true;
1566 ac->sched = true; 1591 list_add_tail(&ac->list, &txq->axq_acq);
1567 list_add_tail(&ac->list, &txq->axq_acq);
1568 }
1569 } 1592 }
1570 1593
1571 if (ac == last_ac || 1594 if (ac == last_ac ||
@@ -1708,10 +1731,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1708 list_add_tail(&bf->list, &bf_head); 1731 list_add_tail(&bf->list, &bf_head);
1709 bf->bf_state.bf_type = 0; 1732 bf->bf_state.bf_type = 0;
1710 1733
1711 /* update starting sequence number for subsequent ADDBA request */
1712 if (tid)
1713 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1714
1715 bf->bf_lastbf = bf; 1734 bf->bf_lastbf = bf;
1716 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 1735 ath_tx_fill_desc(sc, bf, txq, fi->framelen);
1717 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 1736 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
@@ -1819,7 +1838,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1819 struct ath_buf *bf; 1838 struct ath_buf *bf;
1820 u8 tidno; 1839 u8 tidno;
1821 1840
1822 spin_lock_bh(&txctl->txq->axq_lock);
1823 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && 1841 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1824 ieee80211_is_data_qos(hdr->frame_control)) { 1842 ieee80211_is_data_qos(hdr->frame_control)) {
1825 tidno = ieee80211_get_qos_ctl(hdr)[0] & 1843 tidno = ieee80211_get_qos_ctl(hdr)[0] &
@@ -1838,7 +1856,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1838 } else { 1856 } else {
1839 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1857 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1840 if (!bf) 1858 if (!bf)
1841 goto out; 1859 return;
1842 1860
1843 bf->bf_state.bfs_paprd = txctl->paprd; 1861 bf->bf_state.bfs_paprd = txctl->paprd;
1844 1862
@@ -1847,9 +1865,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1847 1865
1848 ath_tx_send_normal(sc, txctl->txq, tid, skb); 1866 ath_tx_send_normal(sc, txctl->txq, tid, skb);
1849 } 1867 }
1850
1851out:
1852 spin_unlock_bh(&txctl->txq->axq_lock);
1853} 1868}
1854 1869
1855/* Upon failure caller should free skb */ 1870/* Upon failure caller should free skb */
@@ -1916,9 +1931,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1916 ieee80211_stop_queue(sc->hw, q); 1931 ieee80211_stop_queue(sc->hw, q);
1917 txq->stopped = 1; 1932 txq->stopped = 1;
1918 } 1933 }
1919 spin_unlock_bh(&txq->axq_lock);
1920 1934
1921 ath_tx_start_dma(sc, skb, txctl); 1935 ath_tx_start_dma(sc, skb, txctl);
1936
1937 spin_unlock_bh(&txq->axq_lock);
1938
1922 return 0; 1939 return 0;
1923} 1940}
1924 1941
@@ -1937,9 +1954,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1937 1954
1938 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1955 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1939 1956
1940 if (tx_flags & ATH_TX_BAR)
1941 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1942
1943 if (!(tx_flags & ATH_TX_ERROR)) 1957 if (!(tx_flags & ATH_TX_ERROR))
1944 /* Frame was ACKed */ 1958 /* Frame was ACKed */
1945 tx_info->flags |= IEEE80211_TX_STAT_ACK; 1959 tx_info->flags |= IEEE80211_TX_STAT_ACK;
@@ -1955,7 +1969,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1955 skb_pull(skb, padsize); 1969 skb_pull(skb, padsize);
1956 } 1970 }
1957 1971
1958 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { 1972 if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
1959 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 1973 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
1960 ath_dbg(common, ATH_DBG_PS, 1974 ath_dbg(common, ATH_DBG_PS,
1961 "Going back to sleep after having received TX status (0x%lx)\n", 1975 "Going back to sleep after having received TX status (0x%lx)\n",
@@ -1967,7 +1981,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1967 1981
1968 q = skb_get_queue_mapping(skb); 1982 q = skb_get_queue_mapping(skb);
1969 if (txq == sc->tx.txq_map[q]) { 1983 if (txq == sc->tx.txq_map[q]) {
1970 spin_lock_bh(&txq->axq_lock);
1971 if (WARN_ON(--txq->pending_frames < 0)) 1984 if (WARN_ON(--txq->pending_frames < 0))
1972 txq->pending_frames = 0; 1985 txq->pending_frames = 0;
1973 1986
@@ -1975,7 +1988,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1975 ieee80211_wake_queue(sc->hw, q); 1988 ieee80211_wake_queue(sc->hw, q);
1976 txq->stopped = 0; 1989 txq->stopped = 0;
1977 } 1990 }
1978 spin_unlock_bh(&txq->axq_lock);
1979 } 1991 }
1980 1992
1981 ieee80211_tx_status(hw, skb); 1993 ieee80211_tx_status(hw, skb);
@@ -1983,16 +1995,13 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1983 1995
1984static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1996static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1985 struct ath_txq *txq, struct list_head *bf_q, 1997 struct ath_txq *txq, struct list_head *bf_q,
1986 struct ath_tx_status *ts, int txok, int sendbar) 1998 struct ath_tx_status *ts, int txok)
1987{ 1999{
1988 struct sk_buff *skb = bf->bf_mpdu; 2000 struct sk_buff *skb = bf->bf_mpdu;
1989 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2001 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1990 unsigned long flags; 2002 unsigned long flags;
1991 int tx_flags = 0; 2003 int tx_flags = 0;
1992 2004
1993 if (sendbar)
1994 tx_flags = ATH_TX_BAR;
1995
1996 if (!txok) 2005 if (!txok)
1997 tx_flags |= ATH_TX_ERROR; 2006 tx_flags |= ATH_TX_ERROR;
1998 2007
@@ -2084,8 +2093,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
2084static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, 2093static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2085 struct ath_tx_status *ts, struct ath_buf *bf, 2094 struct ath_tx_status *ts, struct ath_buf *bf,
2086 struct list_head *bf_head) 2095 struct list_head *bf_head)
2087 __releases(txq->axq_lock)
2088 __acquires(txq->axq_lock)
2089{ 2096{
2090 int txok; 2097 int txok;
2091 2098
@@ -2095,16 +2102,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
2095 if (bf_is_ampdu_not_probing(bf)) 2102 if (bf_is_ampdu_not_probing(bf))
2096 txq->axq_ampdu_depth--; 2103 txq->axq_ampdu_depth--;
2097 2104
2098 spin_unlock_bh(&txq->axq_lock);
2099
2100 if (!bf_isampdu(bf)) { 2105 if (!bf_isampdu(bf)) {
2101 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); 2106 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
2102 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0); 2107 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
2103 } else 2108 } else
2104 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); 2109 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
2105 2110
2106 spin_lock_bh(&txq->axq_lock);
2107
2108 if (sc->sc_flags & SC_OP_TXAGGR) 2111 if (sc->sc_flags & SC_OP_TXAGGR)
2109 ath_txq_schedule(sc, txq); 2112 ath_txq_schedule(sc, txq);
2110} 2113}
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index f4cae1cccbff..cba9d0435dc4 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -23,6 +23,7 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/crc32.h> 25#include <linux/crc32.h>
26#include <linux/module.h>
26#include "carl9170.h" 27#include "carl9170.h"
27#include "fwcmd.h" 28#include "fwcmd.h"
28#include "version.h" 29#include "version.h"
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 59472e1605cd..d19a9ee9d057 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref)
314 * feedback either [CTL_REQ_TX_STATUS not set] 314 * feedback either [CTL_REQ_TX_STATUS not set]
315 */ 315 */
316 316
317 dev_kfree_skb_any(skb); 317 ieee80211_free_txskb(ar->hw, skb);
318 return; 318 return;
319 } else { 319 } else {
320 /* 320 /*
@@ -1432,7 +1432,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1432 1432
1433err_free: 1433err_free:
1434 ar->tx_dropped++; 1434 ar->tx_dropped++;
1435 dev_kfree_skb_any(skb); 1435 ieee80211_free_txskb(ar->hw, skb);
1436} 1436}
1437 1437
1438void carl9170_tx_scheduler(struct ar9170 *ar) 1438void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
index 5367b1086e09..508eccf5d982 100644
--- a/drivers/net/wireless/ath/debug.c
+++ b/drivers/net/wireless/ath/debug.c
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/export.h>
17#include "ath.h" 18#include "ath.h"
18 19
19const char *ath_opmode_to_string(enum nl80211_iftype opmode) 20const char *ath_opmode_to_string(enum nl80211_iftype opmode)
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index 3f508e59f146..19befb331073 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/export.h>
17#include <asm/unaligned.h> 18#include <asm/unaligned.h>
18 19
19#include "ath.h" 20#include "ath.h"
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 17b0efd86f9a..4cf7c5eb4813 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -15,6 +15,7 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18#include <linux/export.h>
18#include <asm/unaligned.h> 19#include <asm/unaligned.h>
19#include <net/mac80211.h> 20#include <net/mac80211.h>
20 21
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index f1be57f0f5bb..10dea37431b3 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -15,11 +15,14 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/export.h>
18#include <net/cfg80211.h> 19#include <net/cfg80211.h>
19#include <net/mac80211.h> 20#include <net/mac80211.h>
20#include "regd.h" 21#include "regd.h"
21#include "regd_common.h" 22#include "regd_common.h"
22 23
24static int __ath_regd_init(struct ath_regulatory *reg);
25
23/* 26/*
24 * This is a set of common rules used by our world regulatory domains. 27 * This is a set of common rules used by our world regulatory domains.
25 * We have 12 world regulatory domains. To save space we consolidate 28 * We have 12 world regulatory domains. To save space we consolidate
@@ -346,10 +349,26 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
346 } 349 }
347} 350}
348 351
352static u16 ath_regd_find_country_by_name(char *alpha2)
353{
354 unsigned int i;
355
356 for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
357 if (!memcmp(allCountries[i].isoName, alpha2, 2))
358 return allCountries[i].countryCode;
359 }
360
361 return -1;
362}
363
349int ath_reg_notifier_apply(struct wiphy *wiphy, 364int ath_reg_notifier_apply(struct wiphy *wiphy,
350 struct regulatory_request *request, 365 struct regulatory_request *request,
351 struct ath_regulatory *reg) 366 struct ath_regulatory *reg)
352{ 367{
368 struct ath_common *common = container_of(reg, struct ath_common,
369 regulatory);
370 u16 country_code;
371
353 /* We always apply this */ 372 /* We always apply this */
354 ath_reg_apply_radar_flags(wiphy); 373 ath_reg_apply_radar_flags(wiphy);
355 374
@@ -362,14 +381,37 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
362 return 0; 381 return 0;
363 382
364 switch (request->initiator) { 383 switch (request->initiator) {
365 case NL80211_REGDOM_SET_BY_DRIVER:
366 case NL80211_REGDOM_SET_BY_CORE: 384 case NL80211_REGDOM_SET_BY_CORE:
385 /*
386 * If common->reg_world_copy is world roaming it means we *were*
387 * world roaming... so we now have to restore that data.
388 */
389 if (!ath_is_world_regd(&common->reg_world_copy))
390 break;
391
392 memcpy(reg, &common->reg_world_copy,
393 sizeof(struct ath_regulatory));
394 break;
395 case NL80211_REGDOM_SET_BY_DRIVER:
367 case NL80211_REGDOM_SET_BY_USER: 396 case NL80211_REGDOM_SET_BY_USER:
368 break; 397 break;
369 case NL80211_REGDOM_SET_BY_COUNTRY_IE: 398 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
370 if (ath_is_world_regd(reg)) 399 if (!ath_is_world_regd(reg))
371 ath_reg_apply_world_flags(wiphy, request->initiator, 400 break;
372 reg); 401
402 country_code = ath_regd_find_country_by_name(request->alpha2);
403 if (country_code == (u16) -1)
404 break;
405
406 reg->current_rd = COUNTRY_ERD_FLAG;
407 reg->current_rd |= country_code;
408
409 printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
410 reg->current_rd);
411 __ath_regd_init(reg);
412
413 ath_reg_apply_world_flags(wiphy, request->initiator, reg);
414
373 break; 415 break;
374 } 416 }
375 417
@@ -507,11 +549,7 @@ static void ath_regd_sanitize(struct ath_regulatory *reg)
507 reg->current_rd = 0x64; 549 reg->current_rd = 0x64;
508} 550}
509 551
510int 552static int __ath_regd_init(struct ath_regulatory *reg)
511ath_regd_init(struct ath_regulatory *reg,
512 struct wiphy *wiphy,
513 int (*reg_notifier)(struct wiphy *wiphy,
514 struct regulatory_request *request))
515{ 553{
516 struct country_code_to_enum_rd *country = NULL; 554 struct country_code_to_enum_rd *country = NULL;
517 u16 regdmn; 555 u16 regdmn;
@@ -582,7 +620,29 @@ ath_regd_init(struct ath_regulatory *reg,
582 printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n", 620 printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
583 reg->regpair->regDmnEnum); 621 reg->regpair->regDmnEnum);
584 622
623 return 0;
624}
625
626int
627ath_regd_init(struct ath_regulatory *reg,
628 struct wiphy *wiphy,
629 int (*reg_notifier)(struct wiphy *wiphy,
630 struct regulatory_request *request))
631{
632 struct ath_common *common = container_of(reg, struct ath_common,
633 regulatory);
634 int r;
635
636 r = __ath_regd_init(reg);
637 if (r)
638 return r;
639
640 if (ath_is_world_regd(reg))
641 memcpy(&common->reg_world_copy, reg,
642 sizeof(struct ath_regulatory));
643
585 ath_regd_init_wiphy(reg, wiphy, reg_notifier); 644 ath_regd_init_wiphy(reg, wiphy, reg_notifier);
645
586 return 0; 646 return 0;
587} 647}
588EXPORT_SYMBOL(ath_regd_init); 648EXPORT_SYMBOL(ath_regd_init);
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 447a2307c9d9..37110dfd2c96 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -1011,14 +1011,10 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
1011} 1011}
1012 1012
1013/* Message printing */ 1013/* Message printing */
1014void b43info(struct b43_wl *wl, const char *fmt, ...) 1014__printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
1015 __attribute__ ((format(printf, 2, 3))); 1015__printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
1016void b43err(struct b43_wl *wl, const char *fmt, ...) 1016__printf(2, 3) void b43warn(struct b43_wl *wl, const char *fmt, ...);
1017 __attribute__ ((format(printf, 2, 3))); 1017__printf(2, 3) void b43dbg(struct b43_wl *wl, const char *fmt, ...);
1018void b43warn(struct b43_wl *wl, const char *fmt, ...)
1019 __attribute__ ((format(printf, 2, 3)));
1020void b43dbg(struct b43_wl *wl, const char *fmt, ...)
1021 __attribute__ ((format(printf, 2, 3)));
1022 1018
1023 1019
1024/* A WARN_ON variant that vanishes when b43 debugging is disabled. 1020/* A WARN_ON variant that vanishes when b43 debugging is disabled.
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7cf4125a1624..5634d9a9965b 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -34,7 +34,7 @@
34 34
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/moduleparam.h> 37#include <linux/module.h>
38#include <linux/if_arp.h> 38#include <linux/if_arp.h>
39#include <linux/etherdevice.h> 39#include <linux/etherdevice.h>
40#include <linux/firmware.h> 40#include <linux/firmware.h>
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 12b6b4067a39..714cad649c45 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/ssb/ssb.h> 26#include <linux/ssb/ssb.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/module.h>
28 29
29#include <pcmcia/cistpl.h> 30#include <pcmcia/cistpl.h>
30#include <pcmcia/ciscode.h> 31#include <pcmcia/ciscode.h>
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b17d9b6c33a5..c8fa2cd97e64 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -228,10 +228,98 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
228static void b43_radio_2056_setup(struct b43_wldev *dev, 228static void b43_radio_2056_setup(struct b43_wldev *dev,
229 const struct b43_nphy_channeltab_entry_rev3 *e) 229 const struct b43_nphy_channeltab_entry_rev3 *e)
230{ 230{
231 struct ssb_sprom *sprom = dev->dev->bus_sprom;
232 enum ieee80211_band band = b43_current_band(dev->wl);
233 u16 offset;
234 u8 i;
235 u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
236
231 B43_WARN_ON(dev->phy.rev < 3); 237 B43_WARN_ON(dev->phy.rev < 3);
232 238
233 b43_chantab_radio_2056_upload(dev, e); 239 b43_chantab_radio_2056_upload(dev, e);
234 /* TODO */ 240 b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
241
242 if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
243 b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
244 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
245 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
246 if (dev->dev->chip_id == 0x4716) {
247 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
248 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
249 } else {
250 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B);
251 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
252 }
253 }
254 if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
255 b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
256 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
257 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
258 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
259 b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
260 }
261
262 if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
263 for (i = 0; i < 2; i++) {
264 offset = i ? B2056_TX1 : B2056_TX0;
265 if (dev->phy.rev >= 5) {
266 b43_radio_write(dev,
267 offset | B2056_TX_PADG_IDAC, 0xcc);
268
269 if (dev->dev->chip_id == 0x4716) {
270 bias = 0x40;
271 cbias = 0x45;
272 pag_boost = 0x5;
273 pgag_boost = 0x33;
274 mixg_boost = 0x55;
275 } else {
276 bias = 0x25;
277 cbias = 0x20;
278 pag_boost = 0x4;
279 pgag_boost = 0x03;
280 mixg_boost = 0x65;
281 }
282 padg_boost = 0x77;
283
284 b43_radio_write(dev,
285 offset | B2056_TX_INTPAG_IMAIN_STAT,
286 bias);
287 b43_radio_write(dev,
288 offset | B2056_TX_INTPAG_IAUX_STAT,
289 bias);
290 b43_radio_write(dev,
291 offset | B2056_TX_INTPAG_CASCBIAS,
292 cbias);
293 b43_radio_write(dev,
294 offset | B2056_TX_INTPAG_BOOST_TUNE,
295 pag_boost);
296 b43_radio_write(dev,
297 offset | B2056_TX_PGAG_BOOST_TUNE,
298 pgag_boost);
299 b43_radio_write(dev,
300 offset | B2056_TX_PADG_BOOST_TUNE,
301 padg_boost);
302 b43_radio_write(dev,
303 offset | B2056_TX_MIXG_BOOST_TUNE,
304 mixg_boost);
305 } else {
306 bias = dev->phy.is_40mhz ? 0x40 : 0x20;
307 b43_radio_write(dev,
308 offset | B2056_TX_INTPAG_IMAIN_STAT,
309 bias);
310 b43_radio_write(dev,
311 offset | B2056_TX_INTPAG_IAUX_STAT,
312 bias);
313 b43_radio_write(dev,
314 offset | B2056_TX_INTPAG_CASCBIAS,
315 0x30);
316 }
317 b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
318 }
319 } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
320 /* TODO */
321 }
322
235 udelay(50); 323 udelay(50);
236 /* VCO calibration */ 324 /* VCO calibration */
237 b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00); 325 b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
@@ -387,7 +475,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
387 if (nphy->hang_avoid) 475 if (nphy->hang_avoid)
388 b43_nphy_stay_in_carrier_search(dev, 1); 476 b43_nphy_stay_in_carrier_search(dev, 1);
389 477
390 if (dev->phy.rev >= 3) { 478 if (dev->phy.rev >= 7) {
479 txpi[0] = txpi[1] = 30;
480 } else if (dev->phy.rev >= 3) {
391 txpi[0] = 40; 481 txpi[0] = 40;
392 txpi[1] = 40; 482 txpi[1] = 40;
393 } else if (sprom->revision < 4) { 483 } else if (sprom->revision < 4) {
@@ -411,6 +501,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
411 txpi[1] = 91; 501 txpi[1] = 91;
412 } 502 }
413 } 503 }
504 if (dev->phy.rev < 7 &&
505 (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 10))
506 txpi[0] = txpi[1] = 91;
414 507
415 /* 508 /*
416 for (i = 0; i < 2; i++) { 509 for (i = 0; i < 2; i++) {
@@ -421,15 +514,31 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
421 514
422 for (i = 0; i < 2; i++) { 515 for (i = 0; i < 2; i++) {
423 if (dev->phy.rev >= 3) { 516 if (dev->phy.rev >= 3) {
424 /* FIXME: support 5GHz */ 517 if (b43_nphy_ipa(dev)) {
425 txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]]; 518 txgain = *(b43_nphy_get_ipa_gain_table(dev) +
519 txpi[i]);
520 } else if (b43_current_band(dev->wl) ==
521 IEEE80211_BAND_5GHZ) {
522 /* FIXME: use 5GHz tables */
523 txgain =
524 b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
525 } else {
526 if (dev->phy.rev >= 5 &&
527 sprom->fem.ghz5.extpa_gain == 3)
528 ; /* FIXME: 5GHz_txgain_HiPwrEPA */
529 txgain =
530 b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
531 }
426 radio_gain = (txgain >> 16) & 0x1FFFF; 532 radio_gain = (txgain >> 16) & 0x1FFFF;
427 } else { 533 } else {
428 txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]]; 534 txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
429 radio_gain = (txgain >> 16) & 0x1FFF; 535 radio_gain = (txgain >> 16) & 0x1FFF;
430 } 536 }
431 537
432 dac_gain = (txgain >> 8) & 0x3F; 538 if (dev->phy.rev >= 7)
539 dac_gain = (txgain >> 8) & 0x7;
540 else
541 dac_gain = (txgain >> 8) & 0x3F;
433 bbmult = txgain & 0xFF; 542 bbmult = txgain & 0xFF;
434 543
435 if (dev->phy.rev >= 3) { 544 if (dev->phy.rev >= 3) {
@@ -459,7 +568,8 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
459 u32 tmp32; 568 u32 tmp32;
460 u16 reg = (i == 0) ? 569 u16 reg = (i == 0) ?
461 B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1; 570 B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
462 tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i])); 571 tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i,
572 576 + txpi[i]));
463 b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4); 573 b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
464 b43_phy_set(dev, reg, 0x4); 574 b43_phy_set(dev, reg, 0x4);
465 } 575 }
@@ -1493,8 +1603,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1493 struct ssb_sprom *sprom = dev->dev->bus_sprom; 1603 struct ssb_sprom *sprom = dev->dev->bus_sprom;
1494 1604
1495 /* TX to RX */ 1605 /* TX to RX */
1496 u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; 1606 u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
1497 u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 }; 1607 u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
1498 /* RX to TX */ 1608 /* RX to TX */
1499 u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, 1609 u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
1500 0x1F }; 1610 0x1F };
@@ -1505,6 +1615,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1505 u16 tmp16; 1615 u16 tmp16;
1506 u32 tmp32; 1616 u32 tmp32;
1507 1617
1618 b43_phy_write(dev, 0x23f, 0x1f8);
1619 b43_phy_write(dev, 0x240, 0x1f8);
1620
1508 tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); 1621 tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
1509 tmp32 &= 0xffffff; 1622 tmp32 &= 0xffffff;
1510 b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); 1623 b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
@@ -1520,12 +1633,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1520 b43_phy_write(dev, 0x2AE, 0x000C); 1633 b43_phy_write(dev, 0x2AE, 0x000C);
1521 1634
1522 /* TX to RX */ 1635 /* TX to RX */
1523 b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9); 1636 b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
1637 ARRAY_SIZE(tx2rx_events));
1524 1638
1525 /* RX to TX */ 1639 /* RX to TX */
1526 if (b43_nphy_ipa(dev)) 1640 if (b43_nphy_ipa(dev))
1527 b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa, 1641 b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
1528 rx2tx_delays_ipa, 9); 1642 rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
1529 if (nphy->hw_phyrxchain != 3 && 1643 if (nphy->hw_phyrxchain != 3 &&
1530 nphy->hw_phyrxchain != nphy->hw_phytxchain) { 1644 nphy->hw_phyrxchain != nphy->hw_phytxchain) {
1531 if (b43_nphy_ipa(dev)) { 1645 if (b43_nphy_ipa(dev)) {
@@ -1533,7 +1647,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1533 rx2tx_delays[6] = 1; 1647 rx2tx_delays[6] = 1;
1534 rx2tx_events[7] = 0x1F; 1648 rx2tx_events[7] = 0x1F;
1535 } 1649 }
1536 b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9); 1650 b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
1651 ARRAY_SIZE(rx2tx_events));
1537 } 1652 }
1538 1653
1539 tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 1654 tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
@@ -1547,8 +1662,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1547 1662
1548 b43_nphy_gain_ctrl_workarounds(dev); 1663 b43_nphy_gain_ctrl_workarounds(dev);
1549 1664
1550 b43_ntab_write(dev, B43_NTAB32(8, 0), 2); 1665 b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
1551 b43_ntab_write(dev, B43_NTAB32(8, 16), 2); 1666 b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
1552 1667
1553 /* TODO */ 1668 /* TODO */
1554 1669
@@ -1560,6 +1675,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1560 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07); 1675 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
1561 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88); 1676 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
1562 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88); 1677 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
1678 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
1679 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
1563 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00); 1680 b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
1564 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00); 1681 b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
1565 1682
@@ -1584,18 +1701,18 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
1584 0x70); 1701 0x70);
1585 } 1702 }
1586 1703
1587 b43_phy_write(dev, 0x224, 0x039C); 1704 b43_phy_write(dev, 0x224, 0x03eb);
1588 b43_phy_write(dev, 0x225, 0x0357); 1705 b43_phy_write(dev, 0x225, 0x03eb);
1589 b43_phy_write(dev, 0x226, 0x0317); 1706 b43_phy_write(dev, 0x226, 0x0341);
1590 b43_phy_write(dev, 0x227, 0x02D7); 1707 b43_phy_write(dev, 0x227, 0x0341);
1591 b43_phy_write(dev, 0x228, 0x039C); 1708 b43_phy_write(dev, 0x228, 0x042b);
1592 b43_phy_write(dev, 0x229, 0x0357); 1709 b43_phy_write(dev, 0x229, 0x042b);
1593 b43_phy_write(dev, 0x22A, 0x0317); 1710 b43_phy_write(dev, 0x22a, 0x0381);
1594 b43_phy_write(dev, 0x22B, 0x02D7); 1711 b43_phy_write(dev, 0x22b, 0x0381);
1595 b43_phy_write(dev, 0x22C, 0x039C); 1712 b43_phy_write(dev, 0x22c, 0x042b);
1596 b43_phy_write(dev, 0x22D, 0x0357); 1713 b43_phy_write(dev, 0x22d, 0x042b);
1597 b43_phy_write(dev, 0x22E, 0x0317); 1714 b43_phy_write(dev, 0x22e, 0x0381);
1598 b43_phy_write(dev, 0x22F, 0x02D7); 1715 b43_phy_write(dev, 0x22f, 0x0381);
1599} 1716}
1600 1717
1601static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) 1718static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -3928,6 +4045,76 @@ int b43_phy_initn(struct b43_wldev *dev)
3928 return 0; 4045 return 0;
3929} 4046}
3930 4047
4048/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
4049static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
4050{
4051 struct bcma_drv_cc *cc;
4052 u32 pmu_ctl;
4053
4054 switch (dev->dev->bus_type) {
4055#ifdef CONFIG_B43_BCMA
4056 case B43_BUS_BCMA:
4057 cc = &dev->dev->bdev->bus->drv_cc;
4058 if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
4059 if (avoid) {
4060 bcma_chipco_pll_write(cc, 0x0, 0x11500010);
4061 bcma_chipco_pll_write(cc, 0x1, 0x000C0C06);
4062 bcma_chipco_pll_write(cc, 0x2, 0x0F600a08);
4063 bcma_chipco_pll_write(cc, 0x3, 0x00000000);
4064 bcma_chipco_pll_write(cc, 0x4, 0x2001E920);
4065 bcma_chipco_pll_write(cc, 0x5, 0x88888815);
4066 } else {
4067 bcma_chipco_pll_write(cc, 0x0, 0x11100010);
4068 bcma_chipco_pll_write(cc, 0x1, 0x000c0c06);
4069 bcma_chipco_pll_write(cc, 0x2, 0x03000a08);
4070 bcma_chipco_pll_write(cc, 0x3, 0x00000000);
4071 bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
4072 bcma_chipco_pll_write(cc, 0x5, 0x88888815);
4073 }
4074 pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
4075 } else if (dev->dev->chip_id == 0x4716) {
4076 if (avoid) {
4077 bcma_chipco_pll_write(cc, 0x0, 0x11500060);
4078 bcma_chipco_pll_write(cc, 0x1, 0x080C0C06);
4079 bcma_chipco_pll_write(cc, 0x2, 0x0F600000);
4080 bcma_chipco_pll_write(cc, 0x3, 0x00000000);
4081 bcma_chipco_pll_write(cc, 0x4, 0x2001E924);
4082 bcma_chipco_pll_write(cc, 0x5, 0x88888815);
4083 } else {
4084 bcma_chipco_pll_write(cc, 0x0, 0x11100060);
4085 bcma_chipco_pll_write(cc, 0x1, 0x080c0c06);
4086 bcma_chipco_pll_write(cc, 0x2, 0x03000000);
4087 bcma_chipco_pll_write(cc, 0x3, 0x00000000);
4088 bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
4089 bcma_chipco_pll_write(cc, 0x5, 0x88888815);
4090 }
4091 pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD |
4092 BCMA_CC_PMU_CTL_NOILPONW;
4093 } else if (dev->dev->chip_id == 0x4322 ||
4094 dev->dev->chip_id == 0x4340 ||
4095 dev->dev->chip_id == 0x4341) {
4096 bcma_chipco_pll_write(cc, 0x0, 0x11100070);
4097 bcma_chipco_pll_write(cc, 0x1, 0x1014140a);
4098 bcma_chipco_pll_write(cc, 0x5, 0x88888854);
4099 if (avoid)
4100 bcma_chipco_pll_write(cc, 0x2, 0x05201828);
4101 else
4102 bcma_chipco_pll_write(cc, 0x2, 0x05001828);
4103 pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
4104 } else {
4105 return;
4106 }
4107 bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl);
4108 break;
4109#endif
4110#ifdef CONFIG_B43_SSB
4111 case B43_BUS_SSB:
4112 /* FIXME */
4113 break;
4114#endif
4115 }
4116}
4117
3931/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ 4118/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
3932static void b43_nphy_channel_setup(struct b43_wldev *dev, 4119static void b43_nphy_channel_setup(struct b43_wldev *dev,
3933 const struct b43_phy_n_sfo_cfg *e, 4120 const struct b43_phy_n_sfo_cfg *e,
@@ -3935,6 +4122,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
3935{ 4122{
3936 struct b43_phy *phy = &dev->phy; 4123 struct b43_phy *phy = &dev->phy;
3937 struct b43_phy_n *nphy = dev->phy.n; 4124 struct b43_phy_n *nphy = dev->phy.n;
4125 int ch = new_channel->hw_value;
3938 4126
3939 u16 old_band_5ghz; 4127 u16 old_band_5ghz;
3940 u32 tmp32; 4128 u32 tmp32;
@@ -3974,8 +4162,41 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
3974 4162
3975 b43_nphy_tx_lp_fbw(dev); 4163 b43_nphy_tx_lp_fbw(dev);
3976 4164
3977 if (dev->phy.rev >= 3 && 0) { 4165 if (dev->phy.rev >= 3 &&
3978 /* TODO */ 4166 dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
4167 bool avoid = false;
4168 if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
4169 avoid = true;
4170 } else if (!b43_channel_type_is_40mhz(phy->channel_type)) {
4171 if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
4172 avoid = true;
4173 } else { /* 40MHz */
4174 if (nphy->aband_spurwar_en &&
4175 (ch == 38 || ch == 102 || ch == 118))
4176 avoid = dev->dev->chip_id == 0x4716;
4177 }
4178
4179 b43_nphy_pmu_spur_avoid(dev, avoid);
4180
4181 if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 ||
4182 dev->dev->chip_id == 43225) {
4183 b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW,
4184 avoid ? 0x5341 : 0x8889);
4185 b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
4186 }
4187
4188 if (dev->phy.rev == 3 || dev->phy.rev == 4)
4189 ; /* TODO: reset PLL */
4190
4191 if (avoid)
4192 b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX);
4193 else
4194 b43_phy_mask(dev, B43_NPHY_BBCFG,
4195 ~B43_NPHY_BBCFG_RSTRX & 0xFFFF);
4196
4197 b43_nphy_reset_cca(dev);
4198
4199 /* wl sets useless phy_isspuravoid here */
3979 } 4200 }
3980 4201
3981 b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830); 4202 b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
@@ -4055,10 +4276,13 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
4055{ 4276{
4056 struct b43_phy *phy = &dev->phy; 4277 struct b43_phy *phy = &dev->phy;
4057 struct b43_phy_n *nphy = phy->n; 4278 struct b43_phy_n *nphy = phy->n;
4279 struct ssb_sprom *sprom = dev->dev->bus_sprom;
4058 4280
4059 memset(nphy, 0, sizeof(*nphy)); 4281 memset(nphy, 0, sizeof(*nphy));
4060 4282
4061 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); 4283 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
4284 nphy->spur_avoid = (phy->rev >= 3) ?
4285 B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
4062 nphy->gain_boost = true; /* this way we follow wl, assume it is true */ 4286 nphy->gain_boost = true; /* this way we follow wl, assume it is true */
4063 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ 4287 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
4064 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ 4288 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4067,6 +4291,38 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
4067 * 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */ 4291 * 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
4068 nphy->tx_pwr_idx[0] = 128; 4292 nphy->tx_pwr_idx[0] = 128;
4069 nphy->tx_pwr_idx[1] = 128; 4293 nphy->tx_pwr_idx[1] = 128;
4294
4295 /* Hardware TX power control and 5GHz power gain */
4296 nphy->txpwrctrl = false;
4297 nphy->pwg_gain_5ghz = false;
4298 if (dev->phy.rev >= 3 ||
4299 (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
4300 (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) {
4301 nphy->txpwrctrl = true;
4302 nphy->pwg_gain_5ghz = true;
4303 } else if (sprom->revision >= 4) {
4304 if (dev->phy.rev >= 2 &&
4305 (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) {
4306 nphy->txpwrctrl = true;
4307#ifdef CONFIG_B43_SSB
4308 if (dev->dev->bus_type == B43_BUS_SSB &&
4309 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) {
4310 struct pci_dev *pdev =
4311 dev->dev->sdev->bus->host_pci;
4312 if (pdev->device == 0x4328 ||
4313 pdev->device == 0x432a)
4314 nphy->pwg_gain_5ghz = true;
4315 }
4316#endif
4317 } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) {
4318 nphy->pwg_gain_5ghz = true;
4319 }
4320 }
4321
4322 if (dev->phy.rev >= 3) {
4323 nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
4324 nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
4325 }
4070} 4326}
4071 4327
4072static void b43_nphy_op_free(struct b43_wldev *dev) 4328static void b43_nphy_op_free(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fbf520285bd1..56ef97b5b815 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -716,6 +716,12 @@
716 716
717struct b43_wldev; 717struct b43_wldev;
718 718
719enum b43_nphy_spur_avoid {
720 B43_SPUR_AVOID_DISABLE,
721 B43_SPUR_AVOID_AUTO,
722 B43_SPUR_AVOID_FORCE,
723};
724
719struct b43_chanspec { 725struct b43_chanspec {
720 u16 center_freq; 726 u16 center_freq;
721 enum nl80211_channel_type channel_type; 727 enum nl80211_channel_type channel_type;
@@ -785,6 +791,7 @@ struct b43_phy_n {
785 u16 mphase_txcal_bestcoeffs[11]; 791 u16 mphase_txcal_bestcoeffs[11];
786 792
787 bool txpwrctrl; 793 bool txpwrctrl;
794 bool pwg_gain_5ghz;
788 u8 tx_pwr_idx[2]; 795 u8 tx_pwr_idx[2];
789 u16 adj_pwr_tbl[84]; 796 u16 adj_pwr_tbl[84];
790 u16 txcal_bbmult; 797 u16 txcal_bbmult;
@@ -803,6 +810,7 @@ struct b43_phy_n {
803 u16 classifier_state; 810 u16 classifier_state;
804 u16 clip_state[2]; 811 u16 clip_state[2];
805 812
813 enum b43_nphy_spur_avoid spur_avoid;
806 bool aband_spurwar_en; 814 bool aband_spurwar_en;
807 bool gband_spurwar_en; 815 bool gband_spurwar_en;
808 816
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index a01f776ca4de..ce037fb6789a 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
1572 [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, }, 1572 [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
1573 [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, }, 1573 [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1574 [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, }, 1574 [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1575 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, }, 1575 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
1576 [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, }, 1576 [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
1577 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, }, 1577 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
1578 [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, }, 1578 [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
1579 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, }, 1579 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
1580 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, }, 1580 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
1581 [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, }, 1581 [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1582 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, 1582 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
1583 [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, }, 1583 [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1584 [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, }, 1584 [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
1585 [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, }, 1585 [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
@@ -9055,6 +9055,21 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
9055 B2056_RX1, pts->rx, pts->rx_length); 9055 B2056_RX1, pts->rx, pts->rx_length);
9056} 9056}
9057 9057
9058void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
9059{
9060 struct b2056_inittabs_pts *pts;
9061 const struct b2056_inittab_entry *e;
9062
9063 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
9064 B43_WARN_ON(1);
9065 return;
9066 }
9067 pts = &b2056_inittabs[dev->phy.rev];
9068 e = &pts->syn[B2056_SYN_PLL_CP2];
9069
9070 b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
9071}
9072
9058const struct b43_nphy_channeltab_entry_rev3 * 9073const struct b43_nphy_channeltab_entry_rev3 *
9059b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq) 9074b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
9060{ 9075{
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index a7159d8578be..5b86673459fa 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -1090,6 +1090,7 @@ struct b43_nphy_channeltab_entry_rev3 {
1090 1090
1091void b2056_upload_inittabs(struct b43_wldev *dev, 1091void b2056_upload_inittabs(struct b43_wldev *dev,
1092 bool ghz5, bool ignore_uploadflag); 1092 bool ghz5, bool ignore_uploadflag);
1093void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5);
1093 1094
1094/* Get the NPHY Channel Switch Table entry for a channel. 1095/* Get the NPHY Channel Switch Table entry for a channel.
1095 * Returns NULL on failure to find an entry. */ 1096 * Returns NULL on failure to find an entry. */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 7b326f2efdc9..3252560e9fa1 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2171,6 +2171,48 @@ static const u16 b43_ntab_loftlt1_r3[] = {
2171 0x0000, 0x0000, 2171 0x0000, 0x0000,
2172}; 2172};
2173 2173
2174/* volatile tables, PHY revision >= 3 */
2175
2176/* indexed by antswctl2g */
2177static const u16 b43_ntab_antswctl2g_r3[4][32] = {
2178 {
2179 0x0082, 0x0082, 0x0211, 0x0222, 0x0328,
2180 0x0000, 0x0000, 0x0000, 0x0144, 0x0000,
2181 0x0000, 0x0000, 0x0188, 0x0000, 0x0000,
2182 0x0000, 0x0082, 0x0082, 0x0211, 0x0222,
2183 0x0328, 0x0000, 0x0000, 0x0000, 0x0144,
2184 0x0000, 0x0000, 0x0000, 0x0188, 0x0000,
2185 0x0000, 0x0000,
2186 },
2187 {
2188 0x0022, 0x0022, 0x0011, 0x0022, 0x0022,
2189 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
2190 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
2191 0x0000, 0x0022, 0x0022, 0x0011, 0x0022,
2192 0x0022, 0x0000, 0x0000, 0x0000, 0x0011,
2193 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
2194 0x0000, 0x0000,
2195 },
2196 {
2197 0x0088, 0x0088, 0x0044, 0x0088, 0x0088,
2198 0x0000, 0x0000, 0x0000, 0x0044, 0x0000,
2199 0x0000, 0x0000, 0x0088, 0x0000, 0x0000,
2200 0x0000, 0x0088, 0x0088, 0x0044, 0x0088,
2201 0x0088, 0x0000, 0x0000, 0x0000, 0x0044,
2202 0x0000, 0x0000, 0x0000, 0x0088, 0x0000,
2203 0x0000, 0x0000,
2204 },
2205 {
2206 0x0022, 0x0022, 0x0011, 0x0022, 0x0000,
2207 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
2208 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
2209 0x03cc, 0x0022, 0x0022, 0x0011, 0x0022,
2210 0x0000, 0x0000, 0x0000, 0x0000, 0x0011,
2211 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
2212 0x0000, 0x03cc,
2213 }
2214};
2215
2174/* TX gain tables */ 2216/* TX gain tables */
2175const u32 b43_ntab_tx_gain_rev0_1_2[] = { 2217const u32 b43_ntab_tx_gain_rev0_1_2[] = {
2176 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42, 2218 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
@@ -2652,7 +2694,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
2652const s16 tbl_tx_filter_coef_rev4[7][15] = { 2694const s16 tbl_tx_filter_coef_rev4[7][15] = {
2653 { -377, 137, -407, 208, -1527, 2695 { -377, 137, -407, 208, -1527,
2654 956, 93, 186, 93, 230, 2696 956, 93, 186, 93, 230,
2655 -44, 230, 20, -191, 201 }, 2697 -44, 230, 201, -191, 201 },
2656 { -77, 20, -98, 49, -93, 2698 { -77, 20, -98, 49, -93,
2657 60, 56, 111, 56, 26, 2699 60, 56, 111, 56, 26,
2658 -5, 26, 34, -32, 34 }, 2700 -5, 26, 34, -32, 34 },
@@ -2838,9 +2880,8 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
2838 break; 2880 break;
2839 case B43_NTAB_32BIT: 2881 case B43_NTAB_32BIT:
2840 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); 2882 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
2841 value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); 2883 value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
2842 value <<= 16; 2884 value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
2843 value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
2844 break; 2885 break;
2845 default: 2886 default:
2846 B43_WARN_ON(1); 2887 B43_WARN_ON(1);
@@ -2864,6 +2905,12 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
2864 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); 2905 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
2865 2906
2866 for (i = 0; i < nr_elements; i++) { 2907 for (i = 0; i < nr_elements; i++) {
2908 /* Auto increment broken + caching issue on BCM43224? */
2909 if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) {
2910 b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
2911 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
2912 }
2913
2867 switch (type) { 2914 switch (type) {
2868 case B43_NTAB_8BIT: 2915 case B43_NTAB_8BIT:
2869 *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF; 2916 *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
@@ -2874,9 +2921,10 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
2874 data += 2; 2921 data += 2;
2875 break; 2922 break;
2876 case B43_NTAB_32BIT: 2923 case B43_NTAB_32BIT:
2877 *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); 2924 *((u32 *)data) =
2878 *((u32 *)data) <<= 16; 2925 b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
2879 *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO); 2926 *((u32 *)data) |=
2927 b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
2880 data += 4; 2928 data += 4;
2881 break; 2929 break;
2882 default: 2930 default:
@@ -2932,6 +2980,13 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
2932 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); 2980 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
2933 2981
2934 for (i = 0; i < nr_elements; i++) { 2982 for (i = 0; i < nr_elements; i++) {
2983 /* Auto increment broken + caching issue on BCM43224? */
2984 if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 &&
2985 dev->dev->chip_rev == 1) {
2986 b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
2987 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
2988 }
2989
2935 switch (type) { 2990 switch (type) {
2936 case B43_NTAB_8BIT: 2991 case B43_NTAB_8BIT:
2937 value = *data; 2992 value = *data;
@@ -2999,6 +3054,8 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
2999 } while (0) 3054 } while (0)
3000void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) 3055void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
3001{ 3056{
3057 struct ssb_sprom *sprom = dev->dev->bus_sprom;
3058
3002 /* Static tables */ 3059 /* Static tables */
3003 ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3); 3060 ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
3004 ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3); 3061 ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
@@ -3029,7 +3086,11 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
3029 ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3); 3086 ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
3030 3087
3031 /* Volatile tables */ 3088 /* Volatile tables */
3032 /* TODO */ 3089 if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3))
3090 ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3,
3091 b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]);
3092 else
3093 B43_WARN_ON(1);
3033} 3094}
3034 3095
3035struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( 3096struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index a81696bff0ed..97038c481930 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -126,26 +126,29 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
126#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ 126#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
127#define B43_NTAB_C1_LOFEEDTH_SIZE 128 127#define B43_NTAB_C1_LOFEEDTH_SIZE 128
128 128
129/* Volatile N-PHY tables, PHY revision >= 3 */
130#define B43_NTAB_ANT_SW_CTL_R3 B43_NTAB16( 9, 0) /* antenna software control */
131
129/* Static N-PHY tables, PHY revision >= 3 */ 132/* Static N-PHY tables, PHY revision >= 3 */
130#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */ 133#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 0) /* frame struct */
131#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */ 134#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 0) /* pilot */
132#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */ 135#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
133#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */ 136#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
134#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */ 137#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
135#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */ 138#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
136#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */ 139#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
137#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */ 140#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
138#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */ 141#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
139#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */ 142#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
140#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */ 143#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
141#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */ 144#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
142#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */ 145#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 0) /* PLT lookup */
143#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */ 146#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */
144#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */ 147#define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */
145#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */ 148#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */
146#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */ 149#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */
147#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */ 150#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */
148#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */ 151#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */
149#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */ 152#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
150#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */ 153#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
151#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */ 154#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 12b518251581..1d4fc9db7f5e 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -810,15 +810,15 @@ struct b43legacy_lopair *b43legacy_get_lopair(struct b43legacy_phy *phy,
810 810
811 811
812/* Message printing */ 812/* Message printing */
813void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...) 813__printf(2, 3)
814 __attribute__((format(printf, 2, 3))); 814void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...);
815void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...) 815__printf(2, 3)
816 __attribute__((format(printf, 2, 3))); 816void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...);
817void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...) 817__printf(2, 3)
818 __attribute__((format(printf, 2, 3))); 818void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...);
819#if B43legacy_DEBUG 819#if B43legacy_DEBUG
820void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...) 820__printf(2, 3)
821 __attribute__((format(printf, 2, 3))); 821void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...);
822#else /* DEBUG */ 822#else /* DEBUG */
823# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0) 823# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0)
824#endif /* DEBUG */ 824#endif /* DEBUG */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index a3b72cd72c66..20f02437af8c 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -31,7 +31,7 @@
31 31
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/moduleparam.h> 34#include <linux/module.h>
35#include <linux/if_arp.h> 35#include <linux/if_arp.h>
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <linux/firmware.h> 37#include <linux/firmware.h>
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 2069fc8f7ad1..8f54c2eb6824 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -3,9 +3,8 @@ config BRCMUTIL
3 3
4config BRCMSMAC 4config BRCMSMAC
5 tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" 5 tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
6 depends on PCI
7 depends on MAC80211 6 depends on MAC80211
8 depends on BCMA=n 7 depends on BCMA
9 select BRCMUTIL 8 select BRCMUTIL
10 select FW_LOADER 9 select FW_LOADER
11 select CRC_CCITT 10 select CRC_CCITT
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
deleted file mode 100644
index cecb5e5f412b..000000000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright (c) 2011 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _bcmchip_h_
18#define _bcmchip_h_
19
20/* bcm4329 */
21/* firmware name */
22#define BCM4329_FW_NAME "brcm/bcm4329-fullmac-4.bin"
23#define BCM4329_NV_NAME "brcm/bcm4329-fullmac-4.txt"
24
25#endif /* _bcmchip_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index bff9dcd6fadc..6c85d668c9d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/export.h>
20#include <linux/pci.h> 21#include <linux/pci.h>
21#include <linux/pci_ids.h> 22#include <linux/pci_ids.h>
22#include <linux/sched.h> 23#include <linux/sched.h>
@@ -221,19 +222,12 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
221 return sdiodev->regfail; 222 return sdiodev->regfail;
222} 223}
223 224
224int 225static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
225brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 226 uint flags, uint width, u32 *addr)
226 uint flags,
227 u8 *buf, uint nbytes, struct sk_buff *pkt)
228{ 227{
229 int status; 228 uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
230 uint incr_fix;
231 uint width;
232 uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
233 int err = 0; 229 int err = 0;
234 230
235 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
236
237 /* Async not implemented yet */ 231 /* Async not implemented yet */
238 if (flags & SDIO_REQ_ASYNC) 232 if (flags & SDIO_REQ_ASYNC)
239 return -ENOTSUPP; 233 return -ENOTSUPP;
@@ -246,29 +240,114 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
246 sdiodev->sbwad = bar0; 240 sdiodev->sbwad = bar0;
247 } 241 }
248 242
249 addr &= SBSDIO_SB_OFT_ADDR_MASK; 243 *addr &= SBSDIO_SB_OFT_ADDR_MASK;
244
245 if (width == 4)
246 *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
247
248 return 0;
249}
250
251int
252brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
253 uint flags, u8 *buf, uint nbytes)
254{
255 struct sk_buff *mypkt;
256 int err;
257
258 mypkt = brcmu_pkt_buf_get_skb(nbytes);
259 if (!mypkt) {
260 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
261 nbytes);
262 return -EIO;
263 }
264
265 err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
266 if (!err)
267 memcpy(buf, mypkt->data, nbytes);
268
269 brcmu_pkt_buf_free_skb(mypkt);
270 return err;
271}
272
273int
274brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
275 uint flags, struct sk_buff *pkt)
276{
277 uint incr_fix;
278 uint width;
279 int err = 0;
280
281 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
282 fn, addr, pkt->len);
283
284 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
285 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
286 if (err)
287 return err;
250 288
251 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; 289 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
290 err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
291 fn, addr, pkt);
292
293 return err;
294}
295
296int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
297 uint flags, struct sk_buff_head *pktq)
298{
299 uint incr_fix;
300 uint width;
301 int err = 0;
302
303 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
304 fn, addr, pktq->qlen);
305
252 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 306 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
253 if (width == 4) 307 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
254 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 308 if (err)
309 return err;
255 310
256 status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, 311 incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
257 fn, addr, width, nbytes, buf, pkt); 312 err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
313 pktq);
258 314
259 return status; 315 return err;
260} 316}
261 317
262int 318int
263brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 319brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
264 uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt) 320 uint flags, u8 *buf, uint nbytes)
321{
322 struct sk_buff *mypkt;
323 int err;
324
325 mypkt = brcmu_pkt_buf_get_skb(nbytes);
326 if (!mypkt) {
327 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
328 nbytes);
329 return -EIO;
330 }
331
332 memcpy(mypkt->data, buf, nbytes);
333 err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
334
335 brcmu_pkt_buf_free_skb(mypkt);
336 return err;
337
338}
339
340int
341brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
342 uint flags, struct sk_buff *pkt)
265{ 343{
266 uint incr_fix; 344 uint incr_fix;
267 uint width; 345 uint width;
268 uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; 346 uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
269 int err = 0; 347 int err = 0;
270 348
271 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes); 349 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
350 fn, addr, pkt->len);
272 351
273 /* Async not implemented yet */ 352 /* Async not implemented yet */
274 if (flags & SDIO_REQ_ASYNC) 353 if (flags & SDIO_REQ_ASYNC)
@@ -290,18 +369,39 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
290 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 369 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
291 370
292 return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn, 371 return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
293 addr, width, nbytes, buf, pkt); 372 addr, pkt);
294} 373}
295 374
296int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr, 375int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
297 u8 *buf, uint nbytes) 376 u8 *buf, uint nbytes)
298{ 377{
378 struct sk_buff *mypkt;
379 bool write = rw ? SDIOH_WRITE : SDIOH_READ;
380 int err;
381
299 addr &= SBSDIO_SB_OFT_ADDR_MASK; 382 addr &= SBSDIO_SB_OFT_ADDR_MASK;
300 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 383 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
301 384
302 return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, 385 mypkt = brcmu_pkt_buf_get_skb(nbytes);
303 (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, 386 if (!mypkt) {
304 addr, 4, nbytes, buf, NULL); 387 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
388 nbytes);
389 return -EIO;
390 }
391
392 /* For a write, copy the buffer data into the packet. */
393 if (write)
394 memcpy(mypkt->data, buf, nbytes);
395
396 err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write,
397 SDIO_FUNC_1, addr, mypkt);
398
399 /* For a read, copy the packet data back to the buffer. */
400 if (!err && !write)
401 memcpy(buf, mypkt->data, nbytes);
402
403 brcmu_pkt_buf_free_skb(mypkt);
404 return err;
305} 405}
306 406
307int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn) 407int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
@@ -332,7 +432,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
332 sdiodev->sbwad = SI_ENUM_BASE; 432 sdiodev->sbwad = SI_ENUM_BASE;
333 433
334 /* try to attach to the target device */ 434 /* try to attach to the target device */
335 sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev); 435 sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
336 if (!sdiodev->bus) { 436 if (!sdiodev->bus) {
337 brcmf_dbg(ERROR, "device attach failed\n"); 437 brcmf_dbg(ERROR, "device attach failed\n");
338 ret = -ENODEV; 438 ret = -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index bbaeb2d5c93a..b895f198a950 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -40,6 +40,7 @@
40#define DMA_ALIGN_MASK 0x03 40#define DMA_ALIGN_MASK 0x03
41 41
42#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 42#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
43#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
43 44
44#define SDIO_FUNC1_BLOCKSIZE 64 45#define SDIO_FUNC1_BLOCKSIZE 64
45#define SDIO_FUNC2_BLOCKSIZE 512 46#define SDIO_FUNC2_BLOCKSIZE 512
@@ -47,6 +48,7 @@
47/* devices we support, null terminated */ 48/* devices we support, null terminated */
48static const struct sdio_device_id brcmf_sdmmc_ids[] = { 49static const struct sdio_device_id brcmf_sdmmc_ids[] = {
49 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, 50 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
51 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
50 { /* end: all zeroes */ }, 52 { /* end: all zeroes */ },
51}; 53};
52MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 54MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -204,62 +206,75 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
204 return err_ret; 206 return err_ret;
205} 207}
206 208
209/* precondition: host controller is claimed */
207static int 210static int
208brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc, 211brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
209 uint write, uint func, uint addr, 212 uint func, uint addr, struct sk_buff *pkt, uint pktlen)
210 struct sk_buff *pkt) 213{
214 int err_ret = 0;
215
216 if ((write) && (!fifo)) {
217 err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
218 ((u8 *) (pkt->data)), pktlen);
219 } else if (write) {
220 err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
221 ((u8 *) (pkt->data)), pktlen);
222 } else if (fifo) {
223 err_ret = sdio_readsb(sdiodev->func[func],
224 ((u8 *) (pkt->data)), addr, pktlen);
225 } else {
226 err_ret = sdio_memcpy_fromio(sdiodev->func[func],
227 ((u8 *) (pkt->data)),
228 addr, pktlen);
229 }
230
231 return err_ret;
232}
233
234/*
235 * This function takes a queue of packets. The packets on the queue
236 * are assumed to be properly aligned by the caller.
237 */
238int
239brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
240 uint write, uint func, uint addr,
241 struct sk_buff_head *pktq)
211{ 242{
212 bool fifo = (fix_inc == SDIOH_DATA_FIX); 243 bool fifo = (fix_inc == SDIOH_DATA_FIX);
213 u32 SGCount = 0; 244 u32 SGCount = 0;
214 int err_ret = 0; 245 int err_ret = 0;
215 246
216 struct sk_buff *pnext; 247 struct sk_buff *pkt;
217 248
218 brcmf_dbg(TRACE, "Enter\n"); 249 brcmf_dbg(TRACE, "Enter\n");
219 250
220 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait); 251 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
221 if (brcmf_pm_resume_error(sdiodev)) 252 if (brcmf_pm_resume_error(sdiodev))
222 return -EIO; 253 return -EIO;
223 254
224 /* Claim host controller */ 255 /* Claim host controller */
225 sdio_claim_host(sdiodev->func[func]); 256 sdio_claim_host(sdiodev->func[func]);
226 for (pnext = pkt; pnext; pnext = pnext->next) { 257
227 uint pkt_len = pnext->len; 258 skb_queue_walk(pktq, pkt) {
259 uint pkt_len = pkt->len;
228 pkt_len += 3; 260 pkt_len += 3;
229 pkt_len &= 0xFFFFFFFC; 261 pkt_len &= 0xFFFFFFFC;
230 262
231 if ((write) && (!fifo)) { 263 err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
232 err_ret = sdio_memcpy_toio(sdiodev->func[func], addr, 264 addr, pkt, pkt_len);
233 ((u8 *) (pnext->data)),
234 pkt_len);
235 } else if (write) {
236 err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
237 ((u8 *) (pnext->data)),
238 pkt_len);
239 } else if (fifo) {
240 err_ret = sdio_readsb(sdiodev->func[func],
241 ((u8 *) (pnext->data)),
242 addr, pkt_len);
243 } else {
244 err_ret = sdio_memcpy_fromio(sdiodev->func[func],
245 ((u8 *) (pnext->data)),
246 addr, pkt_len);
247 }
248
249 if (err_ret) { 265 if (err_ret) {
250 brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", 266 brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
251 write ? "TX" : "RX", pnext, SGCount, addr, 267 write ? "TX" : "RX", pkt, SGCount, addr,
252 pkt_len, err_ret); 268 pkt_len, err_ret);
253 } else { 269 } else {
254 brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n", 270 brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
255 write ? "TX" : "RX", pnext, SGCount, addr, 271 write ? "TX" : "RX", pkt, SGCount, addr,
256 pkt_len); 272 pkt_len);
257 } 273 }
258
259 if (!fifo) 274 if (!fifo)
260 addr += pkt_len; 275 addr += pkt_len;
261 SGCount++;
262 276
277 SGCount++;
263 } 278 }
264 279
265 /* Release host controller */ 280 /* Release host controller */
@@ -270,91 +285,45 @@ brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
270} 285}
271 286
272/* 287/*
273 * This function takes a buffer or packet, and fixes everything up 288 * This function takes a single DMA-able packet.
274 * so that in the end, a DMA-able packet is created.
275 *
276 * A buffer does not have an associated packet pointer,
277 * and may or may not be aligned.
278 * A packet may consist of a single packet, or a packet chain.
279 * If it is a packet chain, then all the packets in the chain
280 * must be properly aligned.
281 *
282 * If the packet data is not aligned, then there may only be
283 * one packet, and in this case, it is copied to a new
284 * aligned packet.
285 *
286 */ 289 */
287int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev, 290int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
288 uint fix_inc, uint write, uint func, uint addr, 291 uint fix_inc, uint write, uint func, uint addr,
289 uint reg_width, uint buflen_u, u8 *buffer,
290 struct sk_buff *pkt) 292 struct sk_buff *pkt)
291{ 293{
292 int Status; 294 int status;
293 struct sk_buff *mypkt = NULL; 295 uint pkt_len = pkt->len;
296 bool fifo = (fix_inc == SDIOH_DATA_FIX);
294 297
295 brcmf_dbg(TRACE, "Enter\n"); 298 brcmf_dbg(TRACE, "Enter\n");
296 299
300 if (pkt == NULL)
301 return -EINVAL;
302
297 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait); 303 brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
298 if (brcmf_pm_resume_error(sdiodev)) 304 if (brcmf_pm_resume_error(sdiodev))
299 return -EIO; 305 return -EIO;
300 /* Case 1: we don't have a packet. */
301 if (pkt == NULL) {
302 brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
303 write ? "TX" : "RX", buflen_u);
304 mypkt = brcmu_pkt_buf_get_skb(buflen_u);
305 if (!mypkt) {
306 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
307 buflen_u);
308 return -EIO;
309 }
310
311 /* For a write, copy the buffer data into the packet. */
312 if (write)
313 memcpy(mypkt->data, buffer, buflen_u);
314
315 Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
316 func, addr, mypkt);
317
318 /* For a read, copy the packet data back to the buffer. */
319 if (!write)
320 memcpy(buffer, mypkt->data, buflen_u);
321
322 brcmu_pkt_buf_free_skb(mypkt);
323 } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
324 /*
325 * Case 2: We have a packet, but it is unaligned.
326 * In this case, we cannot have a chain (pkt->next == NULL)
327 */
328 brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
329 write ? "TX" : "RX", pkt->len);
330 mypkt = brcmu_pkt_buf_get_skb(pkt->len);
331 if (!mypkt) {
332 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
333 pkt->len);
334 return -EIO;
335 }
336 306
337 /* For a write, copy the buffer data into the packet. */ 307 /* Claim host controller */
338 if (write) 308 sdio_claim_host(sdiodev->func[func]);
339 memcpy(mypkt->data, pkt->data, pkt->len);
340
341 Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
342 func, addr, mypkt);
343 309
344 /* For a read, copy the packet data back to the buffer. */ 310 pkt_len += 3;
345 if (!write) 311 pkt_len &= (uint)~3;
346 memcpy(pkt->data, mypkt->data, mypkt->len);
347 312
348 brcmu_pkt_buf_free_skb(mypkt); 313 status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
349 } else { /* case 3: We have a packet and 314 addr, pkt, pkt_len);
350 it is aligned. */ 315 if (status) {
351 brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n", 316 brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
352 write ? "Tx" : "Rx"); 317 write ? "TX" : "RX", pkt, addr, pkt_len, status);
353 Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write, 318 } else {
354 func, addr, pkt); 319 brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
320 write ? "TX" : "RX", pkt, addr, pkt_len);
355 } 321 }
356 322
357 return Status; 323 /* Release host controller */
324 sdio_release_host(sdiodev->func[func]);
325
326 return status;
358} 327}
359 328
360/* Read client card reg */ 329/* Read client card reg */
@@ -494,6 +463,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
494{ 463{
495 int ret = 0; 464 int ret = 0;
496 struct brcmf_sdio_dev *sdiodev; 465 struct brcmf_sdio_dev *sdiodev;
466 struct brcmf_bus *bus_if;
497 brcmf_dbg(TRACE, "Enter\n"); 467 brcmf_dbg(TRACE, "Enter\n");
498 brcmf_dbg(TRACE, "func->class=%x\n", func->class); 468 brcmf_dbg(TRACE, "func->class=%x\n", func->class);
499 brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor); 469 brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -505,17 +475,25 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
505 brcmf_dbg(ERROR, "card private drvdata occupied\n"); 475 brcmf_dbg(ERROR, "card private drvdata occupied\n");
506 return -ENXIO; 476 return -ENXIO;
507 } 477 }
478 bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
479 if (!bus_if)
480 return -ENOMEM;
508 sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL); 481 sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
509 if (!sdiodev) 482 if (!sdiodev) {
483 kfree(bus_if);
510 return -ENOMEM; 484 return -ENOMEM;
485 }
511 sdiodev->func[0] = func->card->sdio_func[0]; 486 sdiodev->func[0] = func->card->sdio_func[0];
512 sdiodev->func[1] = func; 487 sdiodev->func[1] = func;
488 sdiodev->bus_if = bus_if;
489 bus_if->bus_priv = sdiodev;
490 bus_if->type = SDIO_BUS;
513 dev_set_drvdata(&func->card->dev, sdiodev); 491 dev_set_drvdata(&func->card->dev, sdiodev);
514 492
515 atomic_set(&sdiodev->suspend, false); 493 atomic_set(&sdiodev->suspend, false);
516 init_waitqueue_head(&sdiodev->request_byte_wait); 494 init_waitqueue_head(&sdiodev->request_byte_wait);
517 init_waitqueue_head(&sdiodev->request_word_wait); 495 init_waitqueue_head(&sdiodev->request_word_wait);
518 init_waitqueue_head(&sdiodev->request_packet_wait); 496 init_waitqueue_head(&sdiodev->request_chain_wait);
519 init_waitqueue_head(&sdiodev->request_buffer_wait); 497 init_waitqueue_head(&sdiodev->request_buffer_wait);
520 } 498 }
521 499
@@ -525,6 +503,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
525 return -ENODEV; 503 return -ENODEV;
526 sdiodev->func[2] = func; 504 sdiodev->func[2] = func;
527 505
506 bus_if = sdiodev->bus_if;
507 sdiodev->dev = &func->dev;
508 dev_set_drvdata(&func->dev, bus_if);
509
528 brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n"); 510 brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
529 ret = brcmf_sdio_probe(sdiodev); 511 ret = brcmf_sdio_probe(sdiodev);
530 } 512 }
@@ -534,6 +516,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
534 516
535static void brcmf_ops_sdio_remove(struct sdio_func *func) 517static void brcmf_ops_sdio_remove(struct sdio_func *func)
536{ 518{
519 struct brcmf_bus *bus_if;
537 struct brcmf_sdio_dev *sdiodev; 520 struct brcmf_sdio_dev *sdiodev;
538 brcmf_dbg(TRACE, "Enter\n"); 521 brcmf_dbg(TRACE, "Enter\n");
539 brcmf_dbg(INFO, "func->class=%x\n", func->class); 522 brcmf_dbg(INFO, "func->class=%x\n", func->class);
@@ -542,10 +525,13 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
542 brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num); 525 brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
543 526
544 if (func->num == 2) { 527 if (func->num == 2) {
545 sdiodev = dev_get_drvdata(&func->card->dev); 528 bus_if = dev_get_drvdata(&func->dev);
529 sdiodev = bus_if->bus_priv;
546 brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n"); 530 brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
547 brcmf_sdio_remove(sdiodev); 531 brcmf_sdio_remove(sdiodev);
548 dev_set_drvdata(&func->card->dev, NULL); 532 dev_set_drvdata(&func->card->dev, NULL);
533 dev_set_drvdata(&func->dev, NULL);
534 kfree(bus_if);
549 kfree(sdiodev); 535 kfree(sdiodev);
550 } 536 }
551} 537}
@@ -554,14 +540,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
554static int brcmf_sdio_suspend(struct device *dev) 540static int brcmf_sdio_suspend(struct device *dev)
555{ 541{
556 mmc_pm_flag_t sdio_flags; 542 mmc_pm_flag_t sdio_flags;
557 struct brcmf_sdio_dev *sdiodev;
558 struct sdio_func *func = dev_to_sdio_func(dev); 543 struct sdio_func *func = dev_to_sdio_func(dev);
544 struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
559 int ret = 0; 545 int ret = 0;
560 546
561 brcmf_dbg(TRACE, "\n"); 547 brcmf_dbg(TRACE, "\n");
562 548
563 sdiodev = dev_get_drvdata(&func->card->dev);
564
565 atomic_set(&sdiodev->suspend, true); 549 atomic_set(&sdiodev->suspend, true);
566 550
567 sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]); 551 sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
@@ -583,10 +567,9 @@ static int brcmf_sdio_suspend(struct device *dev)
583 567
584static int brcmf_sdio_resume(struct device *dev) 568static int brcmf_sdio_resume(struct device *dev)
585{ 569{
586 struct brcmf_sdio_dev *sdiodev;
587 struct sdio_func *func = dev_to_sdio_func(dev); 570 struct sdio_func *func = dev_to_sdio_func(dev);
571 struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
588 572
589 sdiodev = dev_get_drvdata(&func->card->dev);
590 brcmf_sdio_wdtmr_enable(sdiodev, true); 573 brcmf_sdio_wdtmr_enable(sdiodev, true);
591 atomic_set(&sdiodev->suspend, false); 574 atomic_set(&sdiodev->suspend, false);
592 return 0; 575 return 0;
@@ -610,17 +593,26 @@ static struct sdio_driver brcmf_sdmmc_driver = {
610#endif /* CONFIG_PM_SLEEP */ 593#endif /* CONFIG_PM_SLEEP */
611}; 594};
612 595
613/* bus register interface */ 596static void __exit brcmf_sdio_exit(void)
614int brcmf_bus_register(void)
615{ 597{
616 brcmf_dbg(TRACE, "Enter\n"); 598 brcmf_dbg(TRACE, "Enter\n");
617 599
618 return sdio_register_driver(&brcmf_sdmmc_driver); 600 sdio_unregister_driver(&brcmf_sdmmc_driver);
619} 601}
620 602
621void brcmf_bus_unregister(void) 603static int __init brcmf_sdio_init(void)
622{ 604{
605 int ret;
606
623 brcmf_dbg(TRACE, "Enter\n"); 607 brcmf_dbg(TRACE, "Enter\n");
624 608
625 sdio_unregister_driver(&brcmf_sdmmc_driver); 609 ret = sdio_register_driver(&brcmf_sdmmc_driver);
610
611 if (ret)
612 brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
613
614 return ret;
626} 615}
616
617module_init(brcmf_sdio_init);
618module_exit(brcmf_sdio_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 6da519e7578f..ed60f4d69627 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -87,7 +87,7 @@
87#define TOE_TX_CSUM_OL 0x00000001 87#define TOE_TX_CSUM_OL 0x00000001
88#define TOE_RX_CSUM_OL 0x00000002 88#define TOE_RX_CSUM_OL 0x00000002
89 89
90#define BRCMF_BSS_INFO_VERSION 108 /* curr ver of brcmf_bss_info_le struct */ 90#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
91 91
92/* size of brcmf_scan_params not including variable length array */ 92/* size of brcmf_scan_params not including variable length array */
93#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64 93#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -571,8 +571,14 @@ struct brcmf_dcmd {
571 uint needed; /* bytes needed (optional) */ 571 uint needed; /* bytes needed (optional) */
572}; 572};
573 573
574struct brcmf_bus {
575 u8 type; /* bus type */
576 void *bus_priv; /* pointer to bus private structure */
577 enum brcmf_bus_state state;
578};
579
574/* Forward decls for struct brcmf_pub (see below) */ 580/* Forward decls for struct brcmf_pub (see below) */
575struct brcmf_bus; /* device bus info */ 581struct brcmf_sdio; /* device bus info */
576struct brcmf_proto; /* device communication protocol info */ 582struct brcmf_proto; /* device communication protocol info */
577struct brcmf_info; /* device driver info */ 583struct brcmf_info; /* device driver info */
578struct brcmf_cfg80211_dev; /* cfg80211 device info */ 584struct brcmf_cfg80211_dev; /* cfg80211 device info */
@@ -580,15 +586,16 @@ struct brcmf_cfg80211_dev; /* cfg80211 device info */
580/* Common structure for module and instance linkage */ 586/* Common structure for module and instance linkage */
581struct brcmf_pub { 587struct brcmf_pub {
582 /* Linkage ponters */ 588 /* Linkage ponters */
583 struct brcmf_bus *bus; 589 struct brcmf_sdio *bus;
590 struct brcmf_bus *bus_if;
584 struct brcmf_proto *prot; 591 struct brcmf_proto *prot;
585 struct brcmf_info *info; 592 struct brcmf_info *info;
586 struct brcmf_cfg80211_dev *config; 593 struct brcmf_cfg80211_dev *config;
594 struct device *dev; /* fullmac dongle device pointer */
587 595
588 /* Internal brcmf items */ 596 /* Internal brcmf items */
589 bool up; /* Driver up/down (to OS) */ 597 bool up; /* Driver up/down (to OS) */
590 bool txoff; /* Transmit flow-controlled */ 598 bool txoff; /* Transmit flow-controlled */
591 enum brcmf_bus_state busstate;
592 uint hdrlen; /* Total BRCMF header length (proto + bus) */ 599 uint hdrlen; /* Total BRCMF header length (proto + bus) */
593 uint maxctl; /* Max size rxctl request from proto to bus */ 600 uint maxctl; /* Max size rxctl request from proto to bus */
594 uint rxsz; /* Rx buffer size bus module should use */ 601 uint rxsz; /* Rx buffer size bus module should use */
@@ -656,7 +663,6 @@ struct brcmf_pub {
656 663
657 u8 country_code[BRCM_CNTRY_BUF_SZ]; 664 u8 country_code[BRCM_CNTRY_BUF_SZ];
658 char eventmask[BRCMF_EVENTING_MASK_LEN]; 665 char eventmask[BRCMF_EVENTING_MASK_LEN];
659
660}; 666};
661 667
662struct brcmf_if_event { 668struct brcmf_if_event {
@@ -681,8 +687,8 @@ extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
681 * Returned structure should have bus and prot pointers filled in. 687 * Returned structure should have bus and prot pointers filled in.
682 * bus_hdrlen specifies required headroom for bus module header. 688 * bus_hdrlen specifies required headroom for bus module header.
683 */ 689 */
684extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, 690extern struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus,
685 uint bus_hdrlen); 691 uint bus_hdrlen, struct device *dev);
686extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx); 692extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
687extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); 693extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
688 694
@@ -699,7 +705,16 @@ extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
699 705
700/* Receive frame for delivery to OS. Callee disposes of rxp. */ 706/* Receive frame for delivery to OS. Callee disposes of rxp. */
701extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, 707extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
702 struct sk_buff *rxp, int numpkt); 708 struct sk_buff_head *rxlist);
709static inline void brcmf_rx_packet(struct brcmf_pub *drvr, int ifidx,
710 struct sk_buff *pkt)
711{
712 struct sk_buff_head q;
713
714 skb_queue_head_init(&q);
715 skb_queue_tail(&q, pkt);
716 brcmf_rx_frame(drvr, ifidx, &q);
717}
703 718
704/* Return pointer to interface name */ 719/* Return pointer to interface name */
705extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx); 720extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@@ -724,8 +739,6 @@ extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
724 void *pktdata, struct brcmf_event_msg *, 739 void *pktdata, struct brcmf_event_msg *,
725 void **data_ptr); 740 void **data_ptr);
726 741
727extern void brcmf_c_init(void);
728
729extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, 742extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
730 char *name, u8 *mac_addr); 743 char *name, u8 *mac_addr);
731extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx); 744extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index a249407c9a1b..1841f996110b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -27,31 +27,24 @@
27 * Exported from brcmf bus module (brcmf_usb, brcmf_sdio) 27 * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
28 */ 28 */
29 29
30/* Indicate (dis)interest in finding dongles. */
31extern int brcmf_bus_register(void);
32extern void brcmf_bus_unregister(void);
33
34/* obtain linux device object providing bus function */
35extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
36
37/* Stop bus module: clear pending frames, disable data flow */ 30/* Stop bus module: clear pending frames, disable data flow */
38extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus); 31extern void brcmf_sdbrcm_bus_stop(struct device *dev);
39 32
40/* Initialize bus module: prepare for communication w/dongle */ 33/* Initialize bus module: prepare for communication w/dongle */
41extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr); 34extern int brcmf_sdbrcm_bus_init(struct device *dev);
42 35
43/* Send a data frame to the dongle. Callee disposes of txp. */ 36/* Send a data frame to the dongle. Callee disposes of txp. */
44extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp); 37extern int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *txp);
45 38
46/* Send/receive a control message to/from the dongle. 39/* Send/receive a control message to/from the dongle.
47 * Expects caller to enforce a single outstanding transaction. 40 * Expects caller to enforce a single outstanding transaction.
48 */ 41 */
49extern int 42extern int
50brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen); 43brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen);
51 44
52extern int 45extern int
53brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen); 46brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen);
54 47
55extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick); 48extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
56 49
57#endif /* _BRCMF_BUS_H_ */ 50#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index e34c5c3d1d55..ebd53aa7202b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd {
58 * Used on data packets to convey priority across USB. 58 * Used on data packets to convey priority across USB.
59 */ 59 */
60#define BDC_HEADER_LEN 4 60#define BDC_HEADER_LEN 4
61#define BDC_PROTO_VER 1 /* Protocol version */ 61#define BDC_PROTO_VER 2 /* Protocol version */
62#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ 62#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
63#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ 63#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
64#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */ 64#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
@@ -77,7 +77,7 @@ struct brcmf_proto_bdc_header {
77 u8 flags; 77 u8 flags;
78 u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */ 78 u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
79 u8 flags2; 79 u8 flags2;
80 u8 rssi; 80 u8 data_offset;
81}; 81};
82 82
83 83
@@ -116,7 +116,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
116 len = CDC_MAX_MSG_SIZE; 116 len = CDC_MAX_MSG_SIZE;
117 117
118 /* Send request */ 118 /* Send request */
119 return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg, 119 return brcmf_sdbrcm_bus_txctl(drvr->dev, (unsigned char *)&prot->msg,
120 len); 120 len);
121} 121}
122 122
@@ -128,7 +128,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
128 brcmf_dbg(TRACE, "Enter\n"); 128 brcmf_dbg(TRACE, "Enter\n");
129 129
130 do { 130 do {
131 ret = brcmf_sdbrcm_bus_rxctl(drvr->bus, 131 ret = brcmf_sdbrcm_bus_rxctl(drvr->dev,
132 (unsigned char *)&prot->msg, 132 (unsigned char *)&prot->msg,
133 len + sizeof(struct brcmf_proto_cdc_dcmd)); 133 len + sizeof(struct brcmf_proto_cdc_dcmd));
134 if (ret < 0) 134 if (ret < 0)
@@ -280,7 +280,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
280 struct brcmf_proto *prot = drvr->prot; 280 struct brcmf_proto *prot = drvr->prot;
281 int ret = -1; 281 int ret = -1;
282 282
283 if (drvr->busstate == BRCMF_BUS_DOWN) { 283 if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
284 brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n"); 284 brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
285 return ret; 285 return ret;
286 } 286 }
@@ -372,7 +372,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
372 372
373 h->priority = (pktbuf->priority & BDC_PRIORITY_MASK); 373 h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
374 h->flags2 = 0; 374 h->flags2 = 0;
375 h->rssi = 0; 375 h->data_offset = 0;
376 BDC_SET_IF_IDX(h, ifidx); 376 BDC_SET_IF_IDX(h, ifidx);
377} 377}
378 378
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 40928e58b6a6..69f335aeb255 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,8 +32,6 @@
32#define PKTFILTER_BUF_SIZE 2048 32#define PKTFILTER_BUF_SIZE 2048
33#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */ 33#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
34 34
35int brcmf_msg_level;
36
37#define MSGTRACE_VERSION 1 35#define MSGTRACE_VERSION 1
38 36
39#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u) 37#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u)
@@ -85,19 +83,6 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
85 return len; 83 return len;
86} 84}
87 85
88void brcmf_c_init(void)
89{
90 /* Init global variables at run-time, not as part of the declaration.
91 * This is required to support init/de-init of the driver.
92 * Initialization
93 * of globals as part of the declaration results in non-deterministic
94 * behaviour since the value of the globals may be different on the
95 * first time that the driver is initialized vs subsequent
96 * initializations.
97 */
98 brcmf_msg_level = BRCMF_ERROR_VAL;
99}
100
101bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q, 86bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
102 struct sk_buff *pkt, int prec) 87 struct sk_buff *pkt, int prec)
103{ 88{
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 719fd9397eb6..58d92bca9ca2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -43,7 +43,6 @@
43#include "dhd_proto.h" 43#include "dhd_proto.h"
44#include "dhd_dbg.h" 44#include "dhd_dbg.h"
45#include "wl_cfg80211.h" 45#include "wl_cfg80211.h"
46#include "bcmchip.h"
47 46
48MODULE_AUTHOR("Broadcom Corporation"); 47MODULE_AUTHOR("Broadcom Corporation");
49MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver."); 48MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
@@ -77,6 +76,7 @@ struct brcmf_info {
77}; 76};
78 77
79/* Error bits */ 78/* Error bits */
79int brcmf_msg_level = BRCMF_ERROR_VAL;
80module_param(brcmf_msg_level, int, 0); 80module_param(brcmf_msg_level, int, 0);
81 81
82int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name) 82int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
@@ -292,7 +292,7 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
292 struct brcmf_info *drvr_priv = drvr->info; 292 struct brcmf_info *drvr_priv = drvr->info;
293 293
294 /* Reject if down */ 294 /* Reject if down */
295 if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN)) 295 if (!drvr->up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
296 return -ENODEV; 296 return -ENODEV;
297 297
298 /* Update multicast statistic */ 298 /* Update multicast statistic */
@@ -310,7 +310,7 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
310 brcmf_proto_hdrpush(drvr, ifidx, pktbuf); 310 brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
311 311
312 /* Use bus module to send data frame */ 312 /* Use bus module to send data frame */
313 return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf); 313 return brcmf_sdbrcm_bus_txdata(drvr->dev, pktbuf);
314} 314}
315 315
316static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) 316static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
@@ -322,9 +322,11 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
322 brcmf_dbg(TRACE, "Enter\n"); 322 brcmf_dbg(TRACE, "Enter\n");
323 323
324 /* Reject if down */ 324 /* Reject if down */
325 if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) { 325 if (!drvr_priv->pub.up ||
326 brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n", 326 (drvr_priv->pub.bus_if->state == BRCMF_BUS_DOWN)) {
327 drvr_priv->pub.up, drvr_priv->pub.busstate); 327 brcmf_dbg(ERROR, "xmit rejected pub.up=%d state=%d\n",
328 drvr_priv->pub.up,
329 drvr_priv->pub.bus_if->state);
328 netif_stop_queue(ndev); 330 netif_stop_queue(ndev);
329 return -ENODEV; 331 return -ENODEV;
330 } 332 }
@@ -397,26 +399,21 @@ static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
397 return bcmerror; 399 return bcmerror;
398} 400}
399 401
400void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb, 402void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
401 int numpkt) 403 struct sk_buff_head *skb_list)
402{ 404{
403 struct brcmf_info *drvr_priv = drvr->info; 405 struct brcmf_info *drvr_priv = drvr->info;
404 unsigned char *eth; 406 unsigned char *eth;
405 uint len; 407 uint len;
406 void *data; 408 void *data;
407 struct sk_buff *pnext, *save_pktbuf; 409 struct sk_buff *skb, *pnext;
408 int i;
409 struct brcmf_if *ifp; 410 struct brcmf_if *ifp;
410 struct brcmf_event_msg event; 411 struct brcmf_event_msg event;
411 412
412 brcmf_dbg(TRACE, "Enter\n"); 413 brcmf_dbg(TRACE, "Enter\n");
413 414
414 save_pktbuf = skb; 415 skb_queue_walk_safe(skb_list, skb, pnext) {
415 416 skb_unlink(skb, skb_list);
416 for (i = 0; skb && i < numpkt; i++, skb = pnext) {
417
418 pnext = skb->next;
419 skb->next = NULL;
420 417
421 /* Get the protocol, maintain skb around eth_type_trans() 418 /* Get the protocol, maintain skb around eth_type_trans()
422 * The main reason for this hack is for the limitation of 419 * The main reason for this hack is for the limitation of
@@ -437,6 +434,12 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
437 if (ifp == NULL) 434 if (ifp == NULL)
438 ifp = drvr_priv->iflist[0]; 435 ifp = drvr_priv->iflist[0];
439 436
437 if (!ifp || !ifp->ndev ||
438 ifp->ndev->reg_state != NETREG_REGISTERED) {
439 brcmu_pkt_buf_free_skb(skb);
440 continue;
441 }
442
440 skb->dev = ifp->ndev; 443 skb->dev = ifp->ndev;
441 skb->protocol = eth_type_trans(skb, skb->dev); 444 skb->protocol = eth_type_trans(skb, skb->dev);
442 445
@@ -605,9 +608,7 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
605 608
606 sprintf(info->driver, KBUILD_MODNAME); 609 sprintf(info->driver, KBUILD_MODNAME);
607 sprintf(info->version, "%lu", drvr_priv->pub.drv_version); 610 sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
608 sprintf(info->fw_version, "%s", BCM4329_FW_NAME); 611 sprintf(info->bus_info, "%s", dev_name(drvr_priv->pub.dev));
609 sprintf(info->bus_info, "%s",
610 dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
611} 612}
612 613
613static struct ethtool_ops brcmf_ethtool_ops = { 614static struct ethtool_ops brcmf_ethtool_ops = {
@@ -761,7 +762,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
761 buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN); 762 buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
762 763
763 /* send to dongle (must be up, and wl) */ 764 /* send to dongle (must be up, and wl) */
764 if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) { 765 if ((drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA)) {
765 brcmf_dbg(ERROR, "DONGLE_DOWN\n"); 766 brcmf_dbg(ERROR, "DONGLE_DOWN\n");
766 err = -EIO; 767 err = -EIO;
767 goto done; 768 goto done;
@@ -940,7 +941,8 @@ void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
940 } 941 }
941} 942}
942 943
943struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen) 944struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus, uint bus_hdrlen,
945 struct device *dev)
944{ 946{
945 struct brcmf_info *drvr_priv = NULL; 947 struct brcmf_info *drvr_priv = NULL;
946 948
@@ -959,6 +961,8 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
959 /* Link to bus module */ 961 /* Link to bus module */
960 drvr_priv->pub.bus = bus; 962 drvr_priv->pub.bus = bus;
961 drvr_priv->pub.hdrlen = bus_hdrlen; 963 drvr_priv->pub.hdrlen = bus_hdrlen;
964 drvr_priv->pub.bus_if = dev_get_drvdata(dev);
965 drvr_priv->pub.dev = dev;
962 966
963 /* Attach and link in the protocol */ 967 /* Attach and link in the protocol */
964 if (brcmf_proto_attach(&drvr_priv->pub) != 0) { 968 if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
@@ -988,14 +992,14 @@ int brcmf_bus_start(struct brcmf_pub *drvr)
988 brcmf_dbg(TRACE, "\n"); 992 brcmf_dbg(TRACE, "\n");
989 993
990 /* Bring up the bus */ 994 /* Bring up the bus */
991 ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub); 995 ret = brcmf_sdbrcm_bus_init(drvr_priv->pub.dev);
992 if (ret != 0) { 996 if (ret != 0) {
993 brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret); 997 brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
994 return ret; 998 return ret;
995 } 999 }
996 1000
997 /* If bus is not ready, can't come up */ 1001 /* If bus is not ready, can't come up */
998 if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) { 1002 if (drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA) {
999 brcmf_dbg(ERROR, "failed bus is not ready\n"); 1003 brcmf_dbg(ERROR, "failed bus is not ready\n");
1000 return -ENODEV; 1004 return -ENODEV;
1001 } 1005 }
@@ -1077,10 +1081,7 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
1077 1081
1078 /* attach to cfg80211 for primary interface */ 1082 /* attach to cfg80211 for primary interface */
1079 if (!ifidx) { 1083 if (!ifidx) {
1080 drvr->config = 1084 drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
1081 brcmf_cfg80211_attach(ndev,
1082 brcmf_bus_get_device(drvr->bus),
1083 drvr);
1084 if (drvr->config == NULL) { 1085 if (drvr->config == NULL) {
1085 brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n"); 1086 brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
1086 goto fail; 1087 goto fail;
@@ -1114,7 +1115,7 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr)
1114 brcmf_proto_stop(&drvr_priv->pub); 1115 brcmf_proto_stop(&drvr_priv->pub);
1115 1116
1116 /* Stop the bus module */ 1117 /* Stop the bus module */
1117 brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus); 1118 brcmf_sdbrcm_bus_stop(drvr_priv->pub.dev);
1118 } 1119 }
1119 } 1120 }
1120} 1121}
@@ -1148,34 +1149,6 @@ void brcmf_detach(struct brcmf_pub *drvr)
1148 } 1149 }
1149} 1150}
1150 1151
1151static void __exit brcmf_module_cleanup(void)
1152{
1153 brcmf_dbg(TRACE, "Enter\n");
1154
1155 brcmf_bus_unregister();
1156}
1157
1158static int __init brcmf_module_init(void)
1159{
1160 int error;
1161
1162 brcmf_dbg(TRACE, "Enter\n");
1163
1164 error = brcmf_bus_register();
1165
1166 if (error) {
1167 brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
1168 goto failed;
1169 }
1170 return 0;
1171
1172failed:
1173 return -EINVAL;
1174}
1175
1176module_init(brcmf_module_init);
1177module_exit(brcmf_module_cleanup);
1178
1179int brcmf_os_proto_block(struct brcmf_pub *drvr) 1152int brcmf_os_proto_block(struct brcmf_pub *drvr)
1180{ 1153{
1181 struct brcmf_info *drvr_priv = drvr->info; 1154 struct brcmf_info *drvr_priv = drvr->info;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 22913af26db8..43ba0dd48354 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -91,7 +91,6 @@ struct rte_console {
91#include "dhd_bus.h" 91#include "dhd_bus.h"
92#include "dhd_proto.h" 92#include "dhd_proto.h"
93#include "dhd_dbg.h" 93#include "dhd_dbg.h"
94#include <bcmchip.h>
95 94
96#define TXQLEN 2048 /* bulk tx queue length */ 95#define TXQLEN 2048 /* bulk tx queue length */
97#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */ 96#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
@@ -310,6 +309,11 @@ struct rte_console {
310/* Flags for SDH calls */ 309/* Flags for SDH calls */
311#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) 310#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
312 311
312#define BRCMFMAC_FW_NAME "brcm/brcmfmac.bin"
313#define BRCMFMAC_NV_NAME "brcm/brcmfmac.txt"
314MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
315MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
316
313/* 317/*
314 * Conversion of 802.1D priority to precedence level 318 * Conversion of 802.1D priority to precedence level
315 */ 319 */
@@ -445,7 +449,7 @@ struct sdpcm_shared_le {
445 449
446/* misc chip info needed by some of the routines */ 450/* misc chip info needed by some of the routines */
447/* Private data for SDIO bus interaction */ 451/* Private data for SDIO bus interaction */
448struct brcmf_bus { 452struct brcmf_sdio {
449 struct brcmf_pub *drvr; 453 struct brcmf_pub *drvr;
450 454
451 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */ 455 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
@@ -562,9 +566,7 @@ struct brcmf_bus {
562 566
563 struct semaphore sdsem; 567 struct semaphore sdsem;
564 568
565 const char *fw_name;
566 const struct firmware *firmware; 569 const struct firmware *firmware;
567 const char *nv_name;
568 u32 fw_ptr; 570 u32 fw_ptr;
569}; 571};
570 572
@@ -602,7 +604,7 @@ static void pkt_align(struct sk_buff *p, int len, int align)
602} 604}
603 605
604/* To check if there's window offered */ 606/* To check if there's window offered */
605static bool data_ok(struct brcmf_bus *bus) 607static bool data_ok(struct brcmf_sdio *bus)
606{ 608{
607 return (u8)(bus->tx_max - bus->tx_seq) != 0 && 609 return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
608 ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0; 610 ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@@ -613,7 +615,7 @@ static bool data_ok(struct brcmf_bus *bus)
613 * adresses on the 32 bit backplane bus. 615 * adresses on the 32 bit backplane bus.
614 */ 616 */
615static void 617static void
616r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar) 618r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
617{ 619{
618 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); 620 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
619 *retryvar = 0; 621 *retryvar = 0;
@@ -633,7 +635,7 @@ r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
633} 635}
634 636
635static void 637static void
636w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar) 638w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
637{ 639{
638 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV); 640 u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
639 *retryvar = 0; 641 *retryvar = 0;
@@ -658,14 +660,14 @@ w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
658/* Packet free applicable unconditionally for sdio and sdspi. 660/* Packet free applicable unconditionally for sdio and sdspi.
659 * Conditional if bufpool was present for gspi bus. 661 * Conditional if bufpool was present for gspi bus.
660 */ 662 */
661static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt) 663static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
662{ 664{
663 if (bus->usebufpool) 665 if (bus->usebufpool)
664 brcmu_pkt_buf_free_skb(pkt); 666 brcmu_pkt_buf_free_skb(pkt);
665} 667}
666 668
667/* Turn backplane clock on or off */ 669/* Turn backplane clock on or off */
668static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok) 670static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
669{ 671{
670 int err; 672 int err;
671 u8 clkctl, clkreq, devctl; 673 u8 clkctl, clkreq, devctl;
@@ -786,7 +788,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
786} 788}
787 789
788/* Change idle/active SD state */ 790/* Change idle/active SD state */
789static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on) 791static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
790{ 792{
791 brcmf_dbg(TRACE, "Enter\n"); 793 brcmf_dbg(TRACE, "Enter\n");
792 794
@@ -799,7 +801,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
799} 801}
800 802
801/* Transition SD and backplane clock readiness */ 803/* Transition SD and backplane clock readiness */
802static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok) 804static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
803{ 805{
804#ifdef BCMDBG 806#ifdef BCMDBG
805 uint oldstate = bus->clkstate; 807 uint oldstate = bus->clkstate;
@@ -855,7 +857,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
855 return 0; 857 return 0;
856} 858}
857 859
858static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep) 860static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
859{ 861{
860 uint retries = 0; 862 uint retries = 0;
861 863
@@ -927,13 +929,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
927 return 0; 929 return 0;
928} 930}
929 931
930static void bus_wake(struct brcmf_bus *bus) 932static void bus_wake(struct brcmf_sdio *bus)
931{ 933{
932 if (bus->sleeping) 934 if (bus->sleeping)
933 brcmf_sdbrcm_bussleep(bus, false); 935 brcmf_sdbrcm_bussleep(bus, false);
934} 936}
935 937
936static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus) 938static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
937{ 939{
938 u32 intstatus = 0; 940 u32 intstatus = 0;
939 u32 hmb_data; 941 u32 hmb_data;
@@ -1009,7 +1011,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
1009 return intstatus; 1011 return intstatus;
1010} 1012}
1011 1013
1012static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx) 1014static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
1013{ 1015{
1014 uint retries = 0; 1016 uint retries = 0;
1015 u16 lastrbc; 1017 u16 lastrbc;
@@ -1066,11 +1068,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
1066 1068
1067 /* If we can't reach the device, signal failure */ 1069 /* If we can't reach the device, signal failure */
1068 if (err || brcmf_sdcard_regfail(bus->sdiodev)) 1070 if (err || brcmf_sdcard_regfail(bus->sdiodev))
1069 bus->drvr->busstate = BRCMF_BUS_DOWN; 1071 bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
1070} 1072}
1071 1073
1072/* copy a buffer into a pkt buffer chain */ 1074/* copy a buffer into a pkt buffer chain */
1073static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_bus *bus, uint len) 1075static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
1074{ 1076{
1075 uint n, ret = 0; 1077 uint n, ret = 0;
1076 struct sk_buff *p; 1078 struct sk_buff *p;
@@ -1093,7 +1095,7 @@ static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_bus *bus, uint len)
1093} 1095}
1094 1096
1095/* return total length of buffer chain */ 1097/* return total length of buffer chain */
1096static uint brcmf_sdbrcm_glom_len(struct brcmf_bus *bus) 1098static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
1097{ 1099{
1098 struct sk_buff *p; 1100 struct sk_buff *p;
1099 uint total; 1101 uint total;
@@ -1104,7 +1106,7 @@ static uint brcmf_sdbrcm_glom_len(struct brcmf_bus *bus)
1104 return total; 1106 return total;
1105} 1107}
1106 1108
1107static void brcmf_sdbrcm_free_glom(struct brcmf_bus *bus) 1109static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1108{ 1110{
1109 struct sk_buff *cur, *next; 1111 struct sk_buff *cur, *next;
1110 1112
@@ -1114,13 +1116,13 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_bus *bus)
1114 } 1116 }
1115} 1117}
1116 1118
1117static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq) 1119static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1118{ 1120{
1119 u16 dlen, totlen; 1121 u16 dlen, totlen;
1120 u8 *dptr, num = 0; 1122 u8 *dptr, num = 0;
1121 1123
1122 u16 sublen, check; 1124 u16 sublen, check;
1123 struct sk_buff *pfirst, *plast, *pnext, *save_pfirst; 1125 struct sk_buff *pfirst, *pnext;
1124 1126
1125 int errcode; 1127 int errcode;
1126 u8 chan, seq, doff, sfdoff; 1128 u8 chan, seq, doff, sfdoff;
@@ -1137,7 +1139,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
1137 1139
1138 /* If there's a descriptor, generate the packet chain */ 1140 /* If there's a descriptor, generate the packet chain */
1139 if (bus->glomd) { 1141 if (bus->glomd) {
1140 pfirst = plast = pnext = NULL; 1142 pfirst = pnext = NULL;
1141 dlen = (u16) (bus->glomd->len); 1143 dlen = (u16) (bus->glomd->len);
1142 dptr = bus->glomd->data; 1144 dptr = bus->glomd->data;
1143 if (!dlen || (dlen & 1)) { 1145 if (!dlen || (dlen & 1)) {
@@ -1228,17 +1230,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
1228 * packet and and copy into the chain. 1230 * packet and and copy into the chain.
1229 */ 1231 */
1230 if (usechain) { 1232 if (usechain) {
1231 errcode = brcmf_sdcard_recv_buf(bus->sdiodev, 1233 errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
1232 bus->sdiodev->sbwad, 1234 bus->sdiodev->sbwad,
1233 SDIO_FUNC_2, 1235 SDIO_FUNC_2, F2SYNC, &bus->glom);
1234 F2SYNC, (u8 *) pfirst->data, dlen,
1235 pfirst);
1236 } else if (bus->dataptr) { 1236 } else if (bus->dataptr) {
1237 errcode = brcmf_sdcard_recv_buf(bus->sdiodev, 1237 errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
1238 bus->sdiodev->sbwad, 1238 bus->sdiodev->sbwad,
1239 SDIO_FUNC_2, 1239 SDIO_FUNC_2, F2SYNC,
1240 F2SYNC, bus->dataptr, dlen, 1240 bus->dataptr, dlen);
1241 NULL);
1242 sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen); 1241 sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
1243 if (sublen != dlen) { 1242 if (sublen != dlen) {
1244 brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n", 1243 brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
@@ -1338,10 +1337,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
1338 /* Remove superframe header, remember offset */ 1337 /* Remove superframe header, remember offset */
1339 skb_pull(pfirst, doff); 1338 skb_pull(pfirst, doff);
1340 sfdoff = doff; 1339 sfdoff = doff;
1340 num = 0;
1341 1341
1342 /* Validate all the subframe headers */ 1342 /* Validate all the subframe headers */
1343 for (num = 0, pnext = pfirst; pnext && !errcode; 1343 skb_queue_walk(&bus->glom, pnext) {
1344 num++, pnext = pnext->next) { 1344 /* leave when invalid subframe is found */
1345 if (errcode)
1346 break;
1347
1345 dptr = (u8 *) (pnext->data); 1348 dptr = (u8 *) (pnext->data);
1346 dlen = (u16) (pnext->len); 1349 dlen = (u16) (pnext->len);
1347 sublen = get_unaligned_le16(dptr); 1350 sublen = get_unaligned_le16(dptr);
@@ -1374,6 +1377,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
1374 num, doff, sublen, SDPCM_HDRLEN); 1377 num, doff, sublen, SDPCM_HDRLEN);
1375 errcode = -1; 1378 errcode = -1;
1376 } 1379 }
1380 /* increase the subframe count */
1381 num++;
1377 } 1382 }
1378 1383
1379 if (errcode) { 1384 if (errcode) {
@@ -1394,13 +1399,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
1394 } 1399 }
1395 1400
1396 /* Basic SD framing looks ok - process each packet (header) */ 1401 /* Basic SD framing looks ok - process each packet (header) */
1397 save_pfirst = pfirst;
1398 plast = NULL;
1399
1400 for (num = 0; pfirst; rxseq++, pfirst = pnext) {
1401 pnext = pfirst->next;
1402 pfirst->next = NULL;
1403 1402
1403 skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1404 dptr = (u8 *) (pfirst->data); 1404 dptr = (u8 *) (pfirst->data);
1405 sublen = get_unaligned_le16(dptr); 1405 sublen = get_unaligned_le16(dptr);
1406 chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); 1406 chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -1420,6 +1420,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
1420 bus->rx_badseq++; 1420 bus->rx_badseq++;
1421 rxseq = seq; 1421 rxseq = seq;
1422 } 1422 }
1423 rxseq++;
1424
1423#ifdef BCMDBG 1425#ifdef BCMDBG
1424 if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) { 1426 if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
1425 printk(KERN_DEBUG "Rx Subframe Data:\n"); 1427 printk(KERN_DEBUG "Rx Subframe Data:\n");
@@ -1432,36 +1434,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
1432 skb_pull(pfirst, doff); 1434 skb_pull(pfirst, doff);
1433 1435
1434 if (pfirst->len == 0) { 1436 if (pfirst->len == 0) {
1437 skb_unlink(pfirst, &bus->glom);
1435 brcmu_pkt_buf_free_skb(pfirst); 1438 brcmu_pkt_buf_free_skb(pfirst);
1436 if (plast)
1437 plast->next = pnext;
1438 else
1439 save_pfirst = pnext;
1440
1441 continue; 1439 continue;
1442 } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, 1440 } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
1443 pfirst) != 0) { 1441 pfirst) != 0) {
1444 brcmf_dbg(ERROR, "rx protocol error\n"); 1442 brcmf_dbg(ERROR, "rx protocol error\n");
1445 bus->drvr->rx_errors++; 1443 bus->drvr->rx_errors++;
1444 skb_unlink(pfirst, &bus->glom);
1446 brcmu_pkt_buf_free_skb(pfirst); 1445 brcmu_pkt_buf_free_skb(pfirst);
1447 if (plast)
1448 plast->next = pnext;
1449 else
1450 save_pfirst = pnext;
1451
1452 continue; 1446 continue;
1453 } 1447 }
1454 1448
1455 /* this packet will go up, link back into
1456 chain and count it */
1457 pfirst->next = pnext;
1458 plast = pfirst;
1459 num++;
1460
1461#ifdef BCMDBG 1449#ifdef BCMDBG
1462 if (BRCMF_GLOM_ON()) { 1450 if (BRCMF_GLOM_ON()) {
1463 brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n", 1451 brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1464 num, pfirst, pfirst->data, 1452 bus->glom.qlen, pfirst, pfirst->data,
1465 pfirst->len, pfirst->next, 1453 pfirst->len, pfirst->next,
1466 pfirst->prev); 1454 pfirst->prev);
1467 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, 1455 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
@@ -1470,19 +1458,20 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
1470 } 1458 }
1471#endif /* BCMDBG */ 1459#endif /* BCMDBG */
1472 } 1460 }
1473 if (num) { 1461 /* sent any remaining packets up */
1462 if (bus->glom.qlen) {
1474 up(&bus->sdsem); 1463 up(&bus->sdsem);
1475 brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num); 1464 brcmf_rx_frame(bus->drvr, ifidx, &bus->glom);
1476 down(&bus->sdsem); 1465 down(&bus->sdsem);
1477 } 1466 }
1478 1467
1479 bus->rxglomframes++; 1468 bus->rxglomframes++;
1480 bus->rxglompkts += num; 1469 bus->rxglompkts += bus->glom.qlen;
1481 } 1470 }
1482 return num; 1471 return num;
1483} 1472}
1484 1473
1485static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition, 1474static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
1486 bool *pending) 1475 bool *pending)
1487{ 1476{
1488 DECLARE_WAITQUEUE(wait, current); 1477 DECLARE_WAITQUEUE(wait, current);
@@ -1504,7 +1493,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
1504 return timeout; 1493 return timeout;
1505} 1494}
1506 1495
1507static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus) 1496static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
1508{ 1497{
1509 if (waitqueue_active(&bus->dcmd_resp_wait)) 1498 if (waitqueue_active(&bus->dcmd_resp_wait))
1510 wake_up_interruptible(&bus->dcmd_resp_wait); 1499 wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1512,7 +1501,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
1512 return 0; 1501 return 0;
1513} 1502}
1514static void 1503static void
1515brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff) 1504brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1516{ 1505{
1517 uint rdlen, pad; 1506 uint rdlen, pad;
1518 1507
@@ -1570,8 +1559,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
1570 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, 1559 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1571 bus->sdiodev->sbwad, 1560 bus->sdiodev->sbwad,
1572 SDIO_FUNC_2, 1561 SDIO_FUNC_2,
1573 F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen, 1562 F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
1574 NULL);
1575 bus->f2rxdata++; 1563 bus->f2rxdata++;
1576 1564
1577 /* Control frame failures need retransmission */ 1565 /* Control frame failures need retransmission */
@@ -1602,7 +1590,7 @@ done:
1602} 1590}
1603 1591
1604/* Pad read to blocksize for efficiency */ 1592/* Pad read to blocksize for efficiency */
1605static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen) 1593static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
1606{ 1594{
1607 if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) { 1595 if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
1608 *pad = bus->blocksize - (*rdlen % bus->blocksize); 1596 *pad = bus->blocksize - (*rdlen % bus->blocksize);
@@ -1615,7 +1603,7 @@ static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
1615} 1603}
1616 1604
1617static void 1605static void
1618brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen, 1606brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
1619 struct sk_buff **pkt, u8 **rxbuf) 1607 struct sk_buff **pkt, u8 **rxbuf)
1620{ 1608{
1621 int sdret; /* Return code from calls */ 1609 int sdret; /* Return code from calls */
@@ -1627,9 +1615,8 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
1627 pkt_align(*pkt, rdlen, BRCMF_SDALIGN); 1615 pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
1628 *rxbuf = (u8 *) ((*pkt)->data); 1616 *rxbuf = (u8 *) ((*pkt)->data);
1629 /* Read the entire frame */ 1617 /* Read the entire frame */
1630 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, 1618 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1631 SDIO_FUNC_2, F2SYNC, 1619 SDIO_FUNC_2, F2SYNC, *pkt);
1632 *rxbuf, rdlen, *pkt);
1633 bus->f2rxdata++; 1620 bus->f2rxdata++;
1634 1621
1635 if (sdret < 0) { 1622 if (sdret < 0) {
@@ -1648,7 +1635,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
1648 1635
1649/* Checks the header */ 1636/* Checks the header */
1650static int 1637static int
1651brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf, 1638brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
1652 u8 rxseq, u16 nextlen, u16 *len) 1639 u8 rxseq, u16 nextlen, u16 *len)
1653{ 1640{
1654 u16 check; 1641 u16 check;
@@ -1704,7 +1691,7 @@ fail:
1704 1691
1705/* Return true if there may be more frames to read */ 1692/* Return true if there may be more frames to read */
1706static uint 1693static uint
1707brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished) 1694brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
1708{ 1695{
1709 u16 len, check; /* Extracted hardware header fields */ 1696 u16 len, check; /* Extracted hardware header fields */
1710 u8 chan, seq, doff; /* Extracted software header fields */ 1697 u8 chan, seq, doff; /* Extracted software header fields */
@@ -1727,7 +1714,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
1727 *finished = false; 1714 *finished = false;
1728 1715
1729 for (rxseq = bus->rx_seq, rxleft = maxframes; 1716 for (rxseq = bus->rx_seq, rxleft = maxframes;
1730 !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN; 1717 !bus->rxskip && rxleft &&
1718 bus->drvr->bus_if->state != BRCMF_BUS_DOWN;
1731 rxseq++, rxleft--) { 1719 rxseq++, rxleft--) {
1732 1720
1733 /* Handle glomming separately */ 1721 /* Handle glomming separately */
@@ -1857,7 +1845,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
1857 /* Read frame header (hardware and software) */ 1845 /* Read frame header (hardware and software) */
1858 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, 1846 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
1859 SDIO_FUNC_2, F2SYNC, bus->rxhdr, 1847 SDIO_FUNC_2, F2SYNC, bus->rxhdr,
1860 BRCMF_FIRSTREAD, NULL); 1848 BRCMF_FIRSTREAD);
1861 bus->f2rxhdrs++; 1849 bus->f2rxhdrs++;
1862 1850
1863 if (sdret < 0) { 1851 if (sdret < 0) {
@@ -2006,9 +1994,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
2006 pkt_align(pkt, rdlen, BRCMF_SDALIGN); 1994 pkt_align(pkt, rdlen, BRCMF_SDALIGN);
2007 1995
2008 /* Read the remaining frame data */ 1996 /* Read the remaining frame data */
2009 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, 1997 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
2010 SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)), 1998 SDIO_FUNC_2, F2SYNC, pkt);
2011 rdlen, pkt);
2012 bus->f2rxdata++; 1999 bus->f2rxdata++;
2013 2000
2014 if (sdret < 0) { 2001 if (sdret < 0) {
@@ -2075,7 +2062,7 @@ deliver:
2075 2062
2076 /* Unlock during rx call */ 2063 /* Unlock during rx call */
2077 up(&bus->sdsem); 2064 up(&bus->sdsem);
2078 brcmf_rx_frame(bus->drvr, ifidx, pkt, 1); 2065 brcmf_rx_packet(bus->drvr, ifidx, pkt);
2079 down(&bus->sdsem); 2066 down(&bus->sdsem);
2080 } 2067 }
2081 rxcount = maxframes - rxleft; 2068 rxcount = maxframes - rxleft;
@@ -2095,16 +2082,8 @@ deliver:
2095 return rxcount; 2082 return rxcount;
2096} 2083}
2097 2084
2098static int
2099brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
2100 u8 *buf, uint nbytes, struct sk_buff *pkt)
2101{
2102 return brcmf_sdcard_send_buf
2103 (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
2104}
2105
2106static void 2085static void
2107brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar) 2086brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
2108{ 2087{
2109 up(&bus->sdsem); 2088 up(&bus->sdsem);
2110 wait_event_interruptible_timeout(bus->ctrl_wait, 2089 wait_event_interruptible_timeout(bus->ctrl_wait,
@@ -2114,7 +2093,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
2114} 2093}
2115 2094
2116static void 2095static void
2117brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus) 2096brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
2118{ 2097{
2119 if (waitqueue_active(&bus->ctrl_wait)) 2098 if (waitqueue_active(&bus->ctrl_wait))
2120 wake_up_interruptible(&bus->ctrl_wait); 2099 wake_up_interruptible(&bus->ctrl_wait);
@@ -2123,7 +2102,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
2123 2102
2124/* Writes a HW/SW header into the packet and sends it. */ 2103/* Writes a HW/SW header into the packet and sends it. */
2125/* Assumes: (a) header space already there, (b) caller holds lock */ 2104/* Assumes: (a) header space already there, (b) caller holds lock */
2126static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt, 2105static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
2127 uint chan, bool free_pkt) 2106 uint chan, bool free_pkt)
2128{ 2107{
2129 int ret; 2108 int ret;
@@ -2212,9 +2191,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
2212 if (len & (ALIGNMENT - 1)) 2191 if (len & (ALIGNMENT - 1))
2213 len = roundup(len, ALIGNMENT); 2192 len = roundup(len, ALIGNMENT);
2214 2193
2215 ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad, 2194 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
2216 SDIO_FUNC_2, F2SYNC, frame, 2195 SDIO_FUNC_2, F2SYNC, pkt);
2217 len, pkt);
2218 bus->f2txdata++; 2196 bus->f2txdata++;
2219 2197
2220 if (ret < 0) { 2198 if (ret < 0) {
@@ -2261,7 +2239,7 @@ done:
2261 return ret; 2239 return ret;
2262} 2240}
2263 2241
2264static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes) 2242static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
2265{ 2243{
2266 struct sk_buff *pkt; 2244 struct sk_buff *pkt;
2267 u32 intstatus = 0; 2245 u32 intstatus = 0;
@@ -2309,14 +2287,14 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
2309 } 2287 }
2310 2288
2311 /* Deflow-control stack if needed */ 2289 /* Deflow-control stack if needed */
2312 if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) && 2290 if (drvr->up && (drvr->bus_if->state == BRCMF_BUS_DATA) &&
2313 drvr->txoff && (pktq_len(&bus->txq) < TXLOW)) 2291 drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
2314 brcmf_txflowcontrol(drvr, 0, OFF); 2292 brcmf_txflowcontrol(drvr, 0, OFF);
2315 2293
2316 return cnt; 2294 return cnt;
2317} 2295}
2318 2296
2319static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus) 2297static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2320{ 2298{
2321 u32 intstatus, newstatus = 0; 2299 u32 intstatus, newstatus = 0;
2322 uint retries = 0; 2300 uint retries = 0;
@@ -2344,7 +2322,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
2344 SBSDIO_DEVICE_CTL, &err); 2322 SBSDIO_DEVICE_CTL, &err);
2345 if (err) { 2323 if (err) {
2346 brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err); 2324 brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
2347 bus->drvr->busstate = BRCMF_BUS_DOWN; 2325 bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
2348 } 2326 }
2349#endif /* BCMDBG */ 2327#endif /* BCMDBG */
2350 2328
@@ -2354,7 +2332,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
2354 if (err) { 2332 if (err) {
2355 brcmf_dbg(ERROR, "error reading CSR: %d\n", 2333 brcmf_dbg(ERROR, "error reading CSR: %d\n",
2356 err); 2334 err);
2357 bus->drvr->busstate = BRCMF_BUS_DOWN; 2335 bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
2358 } 2336 }
2359 2337
2360 brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", 2338 brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
@@ -2367,7 +2345,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
2367 if (err) { 2345 if (err) {
2368 brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", 2346 brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
2369 err); 2347 err);
2370 bus->drvr->busstate = BRCMF_BUS_DOWN; 2348 bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
2371 } 2349 }
2372 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; 2350 devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
2373 brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1, 2351 brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
@@ -2375,7 +2353,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
2375 if (err) { 2353 if (err) {
2376 brcmf_dbg(ERROR, "error writing DEVCTL: %d\n", 2354 brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
2377 err); 2355 err);
2378 bus->drvr->busstate = BRCMF_BUS_DOWN; 2356 bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
2379 } 2357 }
2380 bus->clkstate = CLK_AVAIL; 2358 bus->clkstate = CLK_AVAIL;
2381 } else { 2359 } else {
@@ -2477,9 +2455,9 @@ clkwait:
2477 (bus->clkstate == CLK_AVAIL)) { 2455 (bus->clkstate == CLK_AVAIL)) {
2478 int ret, i; 2456 int ret, i;
2479 2457
2480 ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad, 2458 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2481 SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf, 2459 SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
2482 (u32) bus->ctrl_frame_len, NULL); 2460 (u32) bus->ctrl_frame_len);
2483 2461
2484 if (ret < 0) { 2462 if (ret < 0) {
2485 /* On failure, abort the command and 2463 /* On failure, abort the command and
@@ -2531,11 +2509,11 @@ clkwait:
2531 else await next interrupt */ 2509 else await next interrupt */
2532 /* On failed register access, all bets are off: 2510 /* On failed register access, all bets are off:
2533 no resched or interrupts */ 2511 no resched or interrupts */
2534 if ((bus->drvr->busstate == BRCMF_BUS_DOWN) || 2512 if ((bus->drvr->bus_if->state == BRCMF_BUS_DOWN) ||
2535 brcmf_sdcard_regfail(bus->sdiodev)) { 2513 brcmf_sdcard_regfail(bus->sdiodev)) {
2536 brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n", 2514 brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
2537 brcmf_sdcard_regfail(bus->sdiodev)); 2515 brcmf_sdcard_regfail(bus->sdiodev));
2538 bus->drvr->busstate = BRCMF_BUS_DOWN; 2516 bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
2539 bus->intstatus = 0; 2517 bus->intstatus = 0;
2540 } else if (bus->clkstate == CLK_PENDING) { 2518 } else if (bus->clkstate == CLK_PENDING) {
2541 brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n"); 2519 brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
@@ -2562,7 +2540,7 @@ clkwait:
2562 2540
2563static int brcmf_sdbrcm_dpc_thread(void *data) 2541static int brcmf_sdbrcm_dpc_thread(void *data)
2564{ 2542{
2565 struct brcmf_bus *bus = (struct brcmf_bus *) data; 2543 struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
2566 2544
2567 allow_signal(SIGTERM); 2545 allow_signal(SIGTERM);
2568 /* Run until signal received */ 2546 /* Run until signal received */
@@ -2572,12 +2550,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
2572 if (!wait_for_completion_interruptible(&bus->dpc_wait)) { 2550 if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
2573 /* Call bus dpc unless it indicated down 2551 /* Call bus dpc unless it indicated down
2574 (then clean stop) */ 2552 (then clean stop) */
2575 if (bus->drvr->busstate != BRCMF_BUS_DOWN) { 2553 if (bus->drvr->bus_if->state != BRCMF_BUS_DOWN) {
2576 if (brcmf_sdbrcm_dpc(bus)) 2554 if (brcmf_sdbrcm_dpc(bus))
2577 complete(&bus->dpc_wait); 2555 complete(&bus->dpc_wait);
2578 } else { 2556 } else {
2579 /* after stopping the bus, exit thread */ 2557 /* after stopping the bus, exit thread */
2580 brcmf_sdbrcm_bus_stop(bus); 2558 brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
2581 bus->dpc_tsk = NULL; 2559 bus->dpc_tsk = NULL;
2582 break; 2560 break;
2583 } 2561 }
@@ -2587,10 +2565,13 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
2587 return 0; 2565 return 0;
2588} 2566}
2589 2567
2590int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt) 2568int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2591{ 2569{
2592 int ret = -EBADE; 2570 int ret = -EBADE;
2593 uint datalen, prec; 2571 uint datalen, prec;
2572 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2573 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
2574 struct brcmf_sdio *bus = sdiodev->bus;
2594 2575
2595 brcmf_dbg(TRACE, "Enter\n"); 2576 brcmf_dbg(TRACE, "Enter\n");
2596 2577
@@ -2638,7 +2619,7 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
2638} 2619}
2639 2620
2640static int 2621static int
2641brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data, 2622brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
2642 uint size) 2623 uint size)
2643{ 2624{
2644 int bcmerror = 0; 2625 int bcmerror = 0;
@@ -2699,7 +2680,7 @@ xfer_done:
2699#ifdef BCMDBG 2680#ifdef BCMDBG
2700#define CONSOLE_LINE_MAX 192 2681#define CONSOLE_LINE_MAX 192
2701 2682
2702static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus) 2683static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
2703{ 2684{
2704 struct brcmf_console *c = &bus->console; 2685 struct brcmf_console *c = &bus->console;
2705 u8 line[CONSOLE_LINE_MAX], ch; 2686 u8 line[CONSOLE_LINE_MAX], ch;
@@ -2776,14 +2757,14 @@ break2:
2776} 2757}
2777#endif /* BCMDBG */ 2758#endif /* BCMDBG */
2778 2759
2779static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len) 2760static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
2780{ 2761{
2781 int i; 2762 int i;
2782 int ret; 2763 int ret;
2783 2764
2784 bus->ctrl_frame_stat = false; 2765 bus->ctrl_frame_stat = false;
2785 ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad, 2766 ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2786 SDIO_FUNC_2, F2SYNC, frame, len, NULL); 2767 SDIO_FUNC_2, F2SYNC, frame, len);
2787 2768
2788 if (ret < 0) { 2769 if (ret < 0) {
2789 /* On failure, abort the command and terminate the frame */ 2770 /* On failure, abort the command and terminate the frame */
@@ -2819,7 +2800,7 @@ static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
2819} 2800}
2820 2801
2821int 2802int
2822brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen) 2803brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2823{ 2804{
2824 u8 *frame; 2805 u8 *frame;
2825 u16 len; 2806 u16 len;
@@ -2827,6 +2808,9 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
2827 uint retries = 0; 2808 uint retries = 0;
2828 u8 doff = 0; 2809 u8 doff = 0;
2829 int ret = -1; 2810 int ret = -1;
2811 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2812 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
2813 struct brcmf_sdio *bus = sdiodev->bus;
2830 2814
2831 brcmf_dbg(TRACE, "Enter\n"); 2815 brcmf_dbg(TRACE, "Enter\n");
2832 2816
@@ -2934,11 +2918,14 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
2934} 2918}
2935 2919
2936int 2920int
2937brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen) 2921brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
2938{ 2922{
2939 int timeleft; 2923 int timeleft;
2940 uint rxlen = 0; 2924 uint rxlen = 0;
2941 bool pending; 2925 bool pending;
2926 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
2927 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
2928 struct brcmf_sdio *bus = sdiodev->bus;
2942 2929
2943 brcmf_dbg(TRACE, "Enter\n"); 2930 brcmf_dbg(TRACE, "Enter\n");
2944 2931
@@ -2971,7 +2958,7 @@ brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
2971 return rxlen ? (int)rxlen : -ETIMEDOUT; 2958 return rxlen ? (int)rxlen : -ETIMEDOUT;
2972} 2959}
2973 2960
2974static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len) 2961static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
2975{ 2962{
2976 int bcmerror = 0; 2963 int bcmerror = 0;
2977 2964
@@ -3004,7 +2991,7 @@ err:
3004 return bcmerror; 2991 return bcmerror;
3005} 2992}
3006 2993
3007static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus) 2994static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
3008{ 2995{
3009 int bcmerror = 0; 2996 int bcmerror = 0;
3010 u32 varsize; 2997 u32 varsize;
@@ -3091,7 +3078,7 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
3091 return bcmerror; 3078 return bcmerror;
3092} 3079}
3093 3080
3094static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter) 3081static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
3095{ 3082{
3096 uint retries; 3083 uint retries;
3097 int bcmerror = 0; 3084 int bcmerror = 0;
@@ -3134,13 +3121,13 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
3134 /* Allow HT Clock now that the ARM is running. */ 3121 /* Allow HT Clock now that the ARM is running. */
3135 bus->alp_only = false; 3122 bus->alp_only = false;
3136 3123
3137 bus->drvr->busstate = BRCMF_BUS_LOAD; 3124 bus->drvr->bus_if->state = BRCMF_BUS_LOAD;
3138 } 3125 }
3139fail: 3126fail:
3140 return bcmerror; 3127 return bcmerror;
3141} 3128}
3142 3129
3143static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus) 3130static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
3144{ 3131{
3145 if (bus->firmware->size < bus->fw_ptr + len) 3132 if (bus->firmware->size < bus->fw_ptr + len)
3146 len = bus->firmware->size - bus->fw_ptr; 3133 len = bus->firmware->size - bus->fw_ptr;
@@ -3150,10 +3137,7 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
3150 return len; 3137 return len;
3151} 3138}
3152 3139
3153MODULE_FIRMWARE(BCM4329_FW_NAME); 3140static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3154MODULE_FIRMWARE(BCM4329_NV_NAME);
3155
3156static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
3157{ 3141{
3158 int offset = 0; 3142 int offset = 0;
3159 uint len; 3143 uint len;
@@ -3162,8 +3146,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
3162 3146
3163 brcmf_dbg(INFO, "Enter\n"); 3147 brcmf_dbg(INFO, "Enter\n");
3164 3148
3165 bus->fw_name = BCM4329_FW_NAME; 3149 ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
3166 ret = request_firmware(&bus->firmware, bus->fw_name,
3167 &bus->sdiodev->func[2]->dev); 3150 &bus->sdiodev->func[2]->dev);
3168 if (ret) { 3151 if (ret) {
3169 brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret); 3152 brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@@ -3253,15 +3236,14 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
3253 return buf_len; 3236 return buf_len;
3254} 3237}
3255 3238
3256static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus) 3239static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3257{ 3240{
3258 uint len; 3241 uint len;
3259 char *memblock = NULL; 3242 char *memblock = NULL;
3260 char *bufp; 3243 char *bufp;
3261 int ret; 3244 int ret;
3262 3245
3263 bus->nv_name = BCM4329_NV_NAME; 3246 ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
3264 ret = request_firmware(&bus->firmware, bus->nv_name,
3265 &bus->sdiodev->func[2]->dev); 3247 &bus->sdiodev->func[2]->dev);
3266 if (ret) { 3248 if (ret) {
3267 brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret); 3249 brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@@ -3301,7 +3283,7 @@ err:
3301 return ret; 3283 return ret;
3302} 3284}
3303 3285
3304static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus) 3286static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3305{ 3287{
3306 int bcmerror = -1; 3288 int bcmerror = -1;
3307 3289
@@ -3334,7 +3316,7 @@ err:
3334} 3316}
3335 3317
3336static bool 3318static bool
3337brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus) 3319brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3338{ 3320{
3339 bool ret; 3321 bool ret;
3340 3322
@@ -3348,12 +3330,15 @@ brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
3348 return ret; 3330 return ret;
3349} 3331}
3350 3332
3351void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus) 3333void brcmf_sdbrcm_bus_stop(struct device *dev)
3352{ 3334{
3353 u32 local_hostintmask; 3335 u32 local_hostintmask;
3354 u8 saveclk; 3336 u8 saveclk;
3355 uint retries; 3337 uint retries;
3356 int err; 3338 int err;
3339 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3340 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
3341 struct brcmf_sdio *bus = sdiodev->bus;
3357 3342
3358 brcmf_dbg(TRACE, "Enter\n"); 3343 brcmf_dbg(TRACE, "Enter\n");
3359 3344
@@ -3382,7 +3367,7 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
3382 bus->hostintmask = 0; 3367 bus->hostintmask = 0;
3383 3368
3384 /* Change our idea of bus state */ 3369 /* Change our idea of bus state */
3385 bus->drvr->busstate = BRCMF_BUS_DOWN; 3370 bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
3386 3371
3387 /* Force clocks on backplane to be sure F2 interrupt propagates */ 3372 /* Force clocks on backplane to be sure F2 interrupt propagates */
3388 saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1, 3373 saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
@@ -3426,9 +3411,11 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
3426 up(&bus->sdsem); 3411 up(&bus->sdsem);
3427} 3412}
3428 3413
3429int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr) 3414int brcmf_sdbrcm_bus_init(struct device *dev)
3430{ 3415{
3431 struct brcmf_bus *bus = drvr->bus; 3416 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3417 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
3418 struct brcmf_sdio *bus = sdiodev->bus;
3432 unsigned long timeout; 3419 unsigned long timeout;
3433 uint retries = 0; 3420 uint retries = 0;
3434 u8 ready, enable; 3421 u8 ready, enable;
@@ -3438,7 +3425,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
3438 brcmf_dbg(TRACE, "Enter\n"); 3425 brcmf_dbg(TRACE, "Enter\n");
3439 3426
3440 /* try to download image and nvram to the dongle */ 3427 /* try to download image and nvram to the dongle */
3441 if (drvr->busstate == BRCMF_BUS_DOWN) { 3428 if (bus_if->state == BRCMF_BUS_DOWN) {
3442 if (!(brcmf_sdbrcm_download_firmware(bus))) 3429 if (!(brcmf_sdbrcm_download_firmware(bus)))
3443 return -1; 3430 return -1;
3444 } 3431 }
@@ -3504,7 +3491,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
3504 SBSDIO_WATERMARK, 8, &err); 3491 SBSDIO_WATERMARK, 8, &err);
3505 3492
3506 /* Set bus state according to enable result */ 3493 /* Set bus state according to enable result */
3507 drvr->busstate = BRCMF_BUS_DATA; 3494 bus_if->state = BRCMF_BUS_DATA;
3508 } 3495 }
3509 3496
3510 else { 3497 else {
@@ -3519,7 +3506,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
3519 SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err); 3506 SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
3520 3507
3521 /* If we didn't come up, turn off backplane clock */ 3508 /* If we didn't come up, turn off backplane clock */
3522 if (drvr->busstate != BRCMF_BUS_DATA) 3509 if (bus_if->state != BRCMF_BUS_DATA)
3523 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); 3510 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3524 3511
3525exit: 3512exit:
@@ -3530,7 +3517,7 @@ exit:
3530 3517
3531void brcmf_sdbrcm_isr(void *arg) 3518void brcmf_sdbrcm_isr(void *arg)
3532{ 3519{
3533 struct brcmf_bus *bus = (struct brcmf_bus *) arg; 3520 struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
3534 3521
3535 brcmf_dbg(TRACE, "Enter\n"); 3522 brcmf_dbg(TRACE, "Enter\n");
3536 3523
@@ -3539,7 +3526,7 @@ void brcmf_sdbrcm_isr(void *arg)
3539 return; 3526 return;
3540 } 3527 }
3541 3528
3542 if (bus->drvr->busstate == BRCMF_BUS_DOWN) { 3529 if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN) {
3543 brcmf_dbg(ERROR, "bus is down. we have nothing to do\n"); 3530 brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
3544 return; 3531 return;
3545 } 3532 }
@@ -3562,14 +3549,14 @@ void brcmf_sdbrcm_isr(void *arg)
3562 complete(&bus->dpc_wait); 3549 complete(&bus->dpc_wait);
3563} 3550}
3564 3551
3565static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr) 3552static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3566{ 3553{
3567 struct brcmf_bus *bus; 3554#ifdef BCMDBG
3555 struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
3556#endif /* BCMDBG */
3568 3557
3569 brcmf_dbg(TIMER, "Enter\n"); 3558 brcmf_dbg(TIMER, "Enter\n");
3570 3559
3571 bus = drvr->bus;
3572
3573 /* Ignore the timer if simulating bus down */ 3560 /* Ignore the timer if simulating bus down */
3574 if (bus->sleeping) 3561 if (bus->sleeping)
3575 return false; 3562 return false;
@@ -3613,7 +3600,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
3613 } 3600 }
3614#ifdef BCMDBG 3601#ifdef BCMDBG
3615 /* Poll for console output periodically */ 3602 /* Poll for console output periodically */
3616 if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) { 3603 if (bus_if->state == BRCMF_BUS_DATA &&
3604 bus->console_interval != 0) {
3617 bus->console.count += BRCMF_WD_POLL_MS; 3605 bus->console.count += BRCMF_WD_POLL_MS;
3618 if (bus->console.count >= bus->console_interval) { 3606 if (bus->console.count >= bus->console_interval) {
3619 bus->console.count -= bus->console_interval; 3607 bus->console.count -= bus->console_interval;
@@ -3648,10 +3636,12 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
3648{ 3636{
3649 if (chipid == BCM4329_CHIP_ID) 3637 if (chipid == BCM4329_CHIP_ID)
3650 return true; 3638 return true;
3639 if (chipid == BCM4330_CHIP_ID)
3640 return true;
3651 return false; 3641 return false;
3652} 3642}
3653 3643
3654static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus) 3644static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
3655{ 3645{
3656 brcmf_dbg(TRACE, "Enter\n"); 3646 brcmf_dbg(TRACE, "Enter\n");
3657 3647
@@ -3663,7 +3653,7 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
3663 bus->databuf = NULL; 3653 bus->databuf = NULL;
3664} 3654}
3665 3655
3666static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus) 3656static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
3667{ 3657{
3668 brcmf_dbg(TRACE, "Enter\n"); 3658 brcmf_dbg(TRACE, "Enter\n");
3669 3659
@@ -3699,7 +3689,7 @@ fail:
3699} 3689}
3700 3690
3701static bool 3691static bool
3702brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva) 3692brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3703{ 3693{
3704 u8 clkctl = 0; 3694 u8 clkctl = 0;
3705 int err = 0; 3695 int err = 0;
@@ -3784,7 +3774,7 @@ fail:
3784 return false; 3774 return false;
3785} 3775}
3786 3776
3787static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus) 3777static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3788{ 3778{
3789 brcmf_dbg(TRACE, "Enter\n"); 3779 brcmf_dbg(TRACE, "Enter\n");
3790 3780
@@ -3792,7 +3782,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
3792 brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx, 3782 brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
3793 SDIO_FUNC_ENABLE_1, NULL); 3783 SDIO_FUNC_ENABLE_1, NULL);
3794 3784
3795 bus->drvr->busstate = BRCMF_BUS_DOWN; 3785 bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
3796 bus->sleeping = false; 3786 bus->sleeping = false;
3797 bus->rxflow = false; 3787 bus->rxflow = false;
3798 3788
@@ -3819,7 +3809,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
3819static int 3809static int
3820brcmf_sdbrcm_watchdog_thread(void *data) 3810brcmf_sdbrcm_watchdog_thread(void *data)
3821{ 3811{
3822 struct brcmf_bus *bus = (struct brcmf_bus *)data; 3812 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3823 3813
3824 allow_signal(SIGTERM); 3814 allow_signal(SIGTERM);
3825 /* Run until signal received */ 3815 /* Run until signal received */
@@ -3827,7 +3817,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
3827 if (kthread_should_stop()) 3817 if (kthread_should_stop())
3828 break; 3818 break;
3829 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { 3819 if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
3830 brcmf_sdbrcm_bus_watchdog(bus->drvr); 3820 brcmf_sdbrcm_bus_watchdog(bus);
3831 /* Count the tick for reference */ 3821 /* Count the tick for reference */
3832 bus->drvr->tickcnt++; 3822 bus->drvr->tickcnt++;
3833 } else 3823 } else
@@ -3839,7 +3829,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
3839static void 3829static void
3840brcmf_sdbrcm_watchdog(unsigned long data) 3830brcmf_sdbrcm_watchdog(unsigned long data)
3841{ 3831{
3842 struct brcmf_bus *bus = (struct brcmf_bus *)data; 3832 struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
3843 3833
3844 if (bus->watchdog_tsk) { 3834 if (bus->watchdog_tsk) {
3845 complete(&bus->watchdog_wait); 3835 complete(&bus->watchdog_wait);
@@ -3850,7 +3840,7 @@ brcmf_sdbrcm_watchdog(unsigned long data)
3850 } 3840 }
3851} 3841}
3852 3842
3853static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus) 3843static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
3854{ 3844{
3855 brcmf_dbg(TRACE, "Enter\n"); 3845 brcmf_dbg(TRACE, "Enter\n");
3856 3846
@@ -3867,7 +3857,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
3867} 3857}
3868 3858
3869/* Detach and free everything */ 3859/* Detach and free everything */
3870static void brcmf_sdbrcm_release(struct brcmf_bus *bus) 3860static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
3871{ 3861{
3872 brcmf_dbg(TRACE, "Enter\n"); 3862 brcmf_dbg(TRACE, "Enter\n");
3873 3863
@@ -3889,21 +3879,10 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
3889 brcmf_dbg(TRACE, "Disconnected\n"); 3879 brcmf_dbg(TRACE, "Disconnected\n");
3890} 3880}
3891 3881
3892void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype, 3882void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3893 u32 regsva, struct brcmf_sdio_dev *sdiodev)
3894{ 3883{
3895 int ret; 3884 int ret;
3896 struct brcmf_bus *bus; 3885 struct brcmf_sdio *bus;
3897
3898 /* Init global variables at run-time, not as part of the declaration.
3899 * This is required to support init/de-init of the driver.
3900 * Initialization
3901 * of globals as part of the declaration results in non-deterministic
3902 * behavior since the value of the globals may be different on the
3903 * first time that the driver is initialized vs subsequent
3904 * initializations.
3905 */
3906 brcmf_c_init();
3907 3886
3908 brcmf_dbg(TRACE, "Enter\n"); 3887 brcmf_dbg(TRACE, "Enter\n");
3909 3888
@@ -3911,7 +3890,7 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
3911 * regsva == SI_ENUM_BASE*/ 3890 * regsva == SI_ENUM_BASE*/
3912 3891
3913 /* Allocate private bus interface state */ 3892 /* Allocate private bus interface state */
3914 bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC); 3893 bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
3915 if (!bus) 3894 if (!bus)
3916 goto fail; 3895 goto fail;
3917 3896
@@ -3963,7 +3942,7 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
3963 } 3942 }
3964 3943
3965 /* Attach to the brcmf/OS/network interface */ 3944 /* Attach to the brcmf/OS/network interface */
3966 bus->drvr = brcmf_attach(bus, SDPCM_RESERVE); 3945 bus->drvr = brcmf_attach(bus, SDPCM_RESERVE, bus->sdiodev->dev);
3967 if (!bus->drvr) { 3946 if (!bus->drvr) {
3968 brcmf_dbg(ERROR, "brcmf_attach failed\n"); 3947 brcmf_dbg(ERROR, "brcmf_attach failed\n");
3969 goto fail; 3948 goto fail;
@@ -4015,7 +3994,7 @@ fail:
4015 3994
4016void brcmf_sdbrcm_disconnect(void *ptr) 3995void brcmf_sdbrcm_disconnect(void *ptr)
4017{ 3996{
4018 struct brcmf_bus *bus = (struct brcmf_bus *)ptr; 3997 struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
4019 3998
4020 brcmf_dbg(TRACE, "Enter\n"); 3999 brcmf_dbg(TRACE, "Enter\n");
4021 4000
@@ -4025,13 +4004,8 @@ void brcmf_sdbrcm_disconnect(void *ptr)
4025 brcmf_dbg(TRACE, "Disconnected\n"); 4004 brcmf_dbg(TRACE, "Disconnected\n");
4026} 4005}
4027 4006
4028struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
4029{
4030 return &bus->sdiodev->func[2]->dev;
4031}
4032
4033void 4007void
4034brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick) 4008brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
4035{ 4009{
4036 /* Totally stop the timer */ 4010 /* Totally stop the timer */
4037 if (!wdtick && bus->wd_timer_valid == true) { 4011 if (!wdtick && bus->wd_timer_valid == true) {
@@ -4042,7 +4016,7 @@ brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
4042 } 4016 }
4043 4017
4044 /* don't start the wd until fw is loaded */ 4018 /* don't start the wd until fw is loaded */
4045 if (bus->drvr->busstate == BRCMF_BUS_DOWN) 4019 if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN)
4046 return; 4020 return;
4047 4021
4048 if (wdtick) { 4022 if (wdtick) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index f6b1822031fe..a6048d78d294 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -59,37 +59,17 @@ struct sdiod_drive_str {
59 u8 strength; /* Pad Drive Strength in mA */ 59 u8 strength; /* Pad Drive Strength in mA */
60 u8 sel; /* Chip-specific select value */ 60 u8 sel; /* Chip-specific select value */
61}; 61};
62/* SDIO Drive Strength to sel value table for PMU Rev 1 */ 62/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
63static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = { 63static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
64 { 64 {32, 0x6},
65 4, 0x2}, { 65 {26, 0x7},
66 2, 0x3}, { 66 {22, 0x4},
67 1, 0x0}, { 67 {16, 0x5},
68 0, 0x0} 68 {12, 0x2},
69 }; 69 {8, 0x3},
70/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */ 70 {4, 0x0},
71static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = { 71 {0, 0x1}
72 { 72};
73 12, 0x7}, {
74 10, 0x6}, {
75 8, 0x5}, {
76 6, 0x4}, {
77 4, 0x2}, {
78 2, 0x1}, {
79 0, 0x0}
80 };
81/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
82static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
83 {
84 32, 0x7}, {
85 26, 0x6}, {
86 22, 0x5}, {
87 16, 0x4}, {
88 12, 0x3}, {
89 8, 0x2}, {
90 4, 0x1}, {
91 0, 0x0}
92 };
93 73
94u8 74u8
95brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid) 75brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
@@ -396,6 +376,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
396 ci->c_inf[3].base = BCM4329_CORE_ARM_BASE; 376 ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
397 ci->ramsize = BCM4329_RAMSIZE; 377 ci->ramsize = BCM4329_RAMSIZE;
398 break; 378 break;
379 case BCM4330_CHIP_ID:
380 ci->c_inf[0].wrapbase = 0x18100000;
381 ci->c_inf[0].cib = 0x27004211;
382 ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
383 ci->c_inf[1].base = 0x18002000;
384 ci->c_inf[1].wrapbase = 0x18102000;
385 ci->c_inf[1].cib = 0x07004211;
386 ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
387 ci->c_inf[2].base = 0x18004000;
388 ci->c_inf[2].wrapbase = 0x18104000;
389 ci->c_inf[2].cib = 0x0d080401;
390 ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
391 ci->c_inf[3].base = 0x18003000;
392 ci->c_inf[3].wrapbase = 0x18103000;
393 ci->c_inf[3].cib = 0x03004211;
394 ci->ramsize = 0x48000;
395 break;
399 default: 396 default:
400 brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip); 397 brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
401 return -ENODEV; 398 return -ENODEV;
@@ -569,19 +566,8 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
569 return; 566 return;
570 567
571 switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) { 568 switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
572 case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): 569 case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
573 str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1; 570 str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8;
574 str_mask = 0x30000000;
575 str_shift = 28;
576 break;
577 case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
578 case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
579 str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
580 str_mask = 0x00003800;
581 str_shift = 11;
582 break;
583 case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
584 str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
585 str_mask = 0x00003800; 571 str_mask = 0x00003800;
586 str_shift = 11; 572 str_shift = 11;
587 break; 573 break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 726fa8981113..d36a2a855a65 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -132,9 +132,10 @@ struct brcmf_sdio_dev {
132 atomic_t suspend; /* suspend flag */ 132 atomic_t suspend; /* suspend flag */
133 wait_queue_head_t request_byte_wait; 133 wait_queue_head_t request_byte_wait;
134 wait_queue_head_t request_word_wait; 134 wait_queue_head_t request_word_wait;
135 wait_queue_head_t request_packet_wait; 135 wait_queue_head_t request_chain_wait;
136 wait_queue_head_t request_buffer_wait; 136 wait_queue_head_t request_buffer_wait;
137 137 struct device *dev;
138 struct brcmf_bus *bus_if;
138}; 139};
139 140
140/* Register/deregister device interrupt handler. */ 141/* Register/deregister device interrupt handler. */
@@ -182,11 +183,21 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
182 * NOTE: Async operation is not currently supported. 183 * NOTE: Async operation is not currently supported.
183 */ 184 */
184extern int 185extern int
186brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
187 uint flags, struct sk_buff *pkt);
188extern int
185brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 189brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
186 uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt); 190 uint flags, u8 *buf, uint nbytes);
191
192extern int
193brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
194 uint flags, struct sk_buff *pkt);
187extern int 195extern int
188brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, 196brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
189 uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt); 197 uint flags, u8 *buf, uint nbytes);
198extern int
199brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
200 uint flags, struct sk_buff_head *pktq);
190 201
191/* Flags bits */ 202/* Flags bits */
192 203
@@ -237,16 +248,18 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
237/* read or write any buffer using cmd53 */ 248/* read or write any buffer using cmd53 */
238extern int 249extern int
239brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev, 250brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
240 uint fix_inc, uint rw, uint fnc_num, 251 uint fix_inc, uint rw, uint fnc_num, u32 addr,
241 u32 addr, uint regwidth, 252 struct sk_buff *pkt);
242 u32 buflen, u8 *buffer, struct sk_buff *pkt); 253extern int
254brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
255 uint write, uint func, uint addr,
256 struct sk_buff_head *pktq);
243 257
244/* Watchdog timer interface for pm ops */ 258/* Watchdog timer interface for pm ops */
245extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, 259extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
246 bool enable); 260 bool enable);
247 261
248extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype, 262extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
249 u32 regsva, struct brcmf_sdio_dev *sdiodev);
250extern void brcmf_sdbrcm_disconnect(void *ptr); 263extern void brcmf_sdbrcm_disconnect(void *ptr);
251extern void brcmf_sdbrcm_isr(void *arg); 264extern void brcmf_sdbrcm_isr(void *arg);
252#endif /* _BRCM_SDH_H_ */ 265#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index cc19a733ac65..f23b0c3e4ea3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
1429 1429
1430static s32 1430static s32
1431brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, 1431brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
1432 enum nl80211_tx_power_setting type, s32 dbm) 1432 enum nl80211_tx_power_setting type, s32 mbm)
1433{ 1433{
1434 1434
1435 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); 1435 struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
@@ -1437,6 +1437,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
1437 u16 txpwrmw; 1437 u16 txpwrmw;
1438 s32 err = 0; 1438 s32 err = 0;
1439 s32 disable = 0; 1439 s32 disable = 0;
1440 s32 dbm = MBM_TO_DBM(mbm);
1440 1441
1441 WL_TRACE("Enter\n"); 1442 WL_TRACE("Enter\n");
1442 if (!check_sys_up(wiphy)) 1443 if (!check_sys_up(wiphy))
@@ -1446,12 +1447,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
1446 case NL80211_TX_POWER_AUTOMATIC: 1447 case NL80211_TX_POWER_AUTOMATIC:
1447 break; 1448 break;
1448 case NL80211_TX_POWER_LIMITED: 1449 case NL80211_TX_POWER_LIMITED:
1449 if (dbm < 0) {
1450 WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
1451 err = -EINVAL;
1452 goto done;
1453 }
1454 break;
1455 case NL80211_TX_POWER_FIXED: 1450 case NL80211_TX_POWER_FIXED:
1456 if (dbm < 0) { 1451 if (dbm < 0) {
1457 WL_ERR("TX_POWER_FIXED - dbm is negative\n"); 1452 WL_ERR("TX_POWER_FIXED - dbm is negative\n");
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 39e305443d7e..ab9bb11abfbb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -318,37 +318,13 @@
318 318
319#define BADIDX (SI_MAXCORES + 1) 319#define BADIDX (SI_MAXCORES + 1)
320 320
321/* Newer chips can access PCI/PCIE and CC core without requiring to change
322 * PCI BAR0 WIN
323 */
324#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
325 (((si)->pub.buscoretype == PCI_CORE_ID) && \
326 (si)->pub.buscorerev >= 13))
327
328#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
329 PCI_16KB0_CCREGS_OFFSET))
330
331#define IS_SIM(chippkg) \ 321#define IS_SIM(chippkg) \
332 ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) 322 ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
333 323
334/* 324#define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID)
335 * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts 325#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
336 * before after core switching to avoid invalid register accesss inside ISR.
337 */
338#define INTR_OFF(si, intr_val) \
339 if ((si)->intrsoff_fn && \
340 (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
341 intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
342
343#define INTR_RESTORE(si, intr_val) \
344 if ((si)->intrsrestore_fn && \
345 (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
346 (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
347 326
348#define PCI(si) ((si)->pub.buscoretype == PCI_CORE_ID) 327#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
349#define PCIE(si) ((si)->pub.buscoretype == PCIE_CORE_ID)
350
351#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
352 328
353#ifdef BCMDBG 329#ifdef BCMDBG
354#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__) 330#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
@@ -360,9 +336,6 @@
360 (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ 336 (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
361 IS_ALIGNED((x), SI_CORE_SIZE)) 337 IS_ALIGNED((x), SI_CORE_SIZE))
362 338
363#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
364 PCI_16KB0_PCIREGS_OFFSET)
365
366struct aidmp { 339struct aidmp {
367 u32 oobselina30; /* 0x000 */ 340 u32 oobselina30; /* 0x000 */
368 u32 oobselina74; /* 0x004 */ 341 u32 oobselina74; /* 0x004 */
@@ -481,406 +454,13 @@ struct aidmp {
481 u32 componentid3; /* 0xffc */ 454 u32 componentid3; /* 0xffc */
482}; 455};
483 456
484/* EROM parsing */
485
486static u32
487get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
488{
489 u32 ent;
490 uint inv = 0, nom = 0;
491
492 while (true) {
493 ent = R_REG(*eromptr);
494 (*eromptr)++;
495
496 if (mask == 0)
497 break;
498
499 if ((ent & ER_VALID) == 0) {
500 inv++;
501 continue;
502 }
503
504 if (ent == (ER_END | ER_VALID))
505 break;
506
507 if ((ent & mask) == match)
508 break;
509
510 nom++;
511 }
512
513 return ent;
514}
515
516static u32
517get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
518 u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
519{
520 u32 asd, sz, szd;
521
522 asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
523 if (((asd & ER_TAG1) != ER_ADD) ||
524 (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
525 ((asd & AD_ST_MASK) != st)) {
526 /* This is not what we want, "push" it back */
527 (*eromptr)--;
528 return 0;
529 }
530 *addrl = asd & AD_ADDR_MASK;
531 if (asd & AD_AG32)
532 *addrh = get_erom_ent(sih, eromptr, 0, 0);
533 else
534 *addrh = 0;
535 *sizeh = 0;
536 sz = asd & AD_SZ_MASK;
537 if (sz == AD_SZ_SZD) {
538 szd = get_erom_ent(sih, eromptr, 0, 0);
539 *sizel = szd & SD_SZ_MASK;
540 if (szd & SD_SG32)
541 *sizeh = get_erom_ent(sih, eromptr, 0, 0);
542 } else
543 *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
544
545 return asd;
546}
547
548static void ai_hwfixup(struct si_info *sii)
549{
550}
551
552/* parse the enumeration rom to identify all cores */
553static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
554{
555 struct si_info *sii = (struct si_info *)sih;
556
557 u32 erombase;
558 u32 __iomem *eromptr, *eromlim;
559 void __iomem *regs = cc;
560
561 erombase = R_REG(&cc->eromptr);
562
563 /* Set wrappers address */
564 sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
565
566 /* Now point the window at the erom */
567 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
568 eromptr = regs;
569 eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
570
571 while (eromptr < eromlim) {
572 u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
573 u32 mpd, asd, addrl, addrh, sizel, sizeh;
574 u32 __iomem *base;
575 uint i, j, idx;
576 bool br;
577
578 br = false;
579
580 /* Grok a component */
581 cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
582 if (cia == (ER_END | ER_VALID)) {
583 /* Found END of erom */
584 ai_hwfixup(sii);
585 return;
586 }
587 base = eromptr - 1;
588 cib = get_erom_ent(sih, &eromptr, 0, 0);
589
590 if ((cib & ER_TAG) != ER_CI) {
591 /* CIA not followed by CIB */
592 goto error;
593 }
594
595 cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
596 mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
597 crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
598 nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
599 nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
600 nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
601 nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
602
603 if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
604 continue;
605 if ((nmw + nsw == 0)) {
606 /* A component which is not a core */
607 if (cid == OOB_ROUTER_CORE_ID) {
608 asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
609 &addrl, &addrh, &sizel, &sizeh);
610 if (asd != 0)
611 sii->oob_router = addrl;
612 }
613 continue;
614 }
615
616 idx = sii->numcores;
617/* sii->eromptr[idx] = base; */
618 sii->cia[idx] = cia;
619 sii->cib[idx] = cib;
620 sii->coreid[idx] = cid;
621
622 for (i = 0; i < nmp; i++) {
623 mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
624 if ((mpd & ER_TAG) != ER_MP) {
625 /* Not enough MP entries for component */
626 goto error;
627 }
628 }
629
630 /* First Slave Address Descriptor should be port 0:
631 * the main register space for the core
632 */
633 asd =
634 get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
635 &sizel, &sizeh);
636 if (asd == 0) {
637 /* Try again to see if it is a bridge */
638 asd =
639 get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
640 &addrh, &sizel, &sizeh);
641 if (asd != 0)
642 br = true;
643 else if ((addrh != 0) || (sizeh != 0)
644 || (sizel != SI_CORE_SIZE)) {
645 /* First Slave ASD for core malformed */
646 goto error;
647 }
648 }
649 sii->coresba[idx] = addrl;
650 sii->coresba_size[idx] = sizel;
651 /* Get any more ASDs in port 0 */
652 j = 1;
653 do {
654 asd =
655 get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
656 &addrh, &sizel, &sizeh);
657 if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
658 sii->coresba2[idx] = addrl;
659 sii->coresba2_size[idx] = sizel;
660 }
661 j++;
662 } while (asd != 0);
663
664 /* Go through the ASDs for other slave ports */
665 for (i = 1; i < nsp; i++) {
666 j = 0;
667 do {
668 asd =
669 get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
670 &addrl, &addrh, &sizel, &sizeh);
671 } while (asd != 0);
672 if (j == 0) {
673 /* SP has no address descriptors */
674 goto error;
675 }
676 }
677
678 /* Now get master wrappers */
679 for (i = 0; i < nmw; i++) {
680 asd =
681 get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
682 &addrh, &sizel, &sizeh);
683 if (asd == 0) {
684 /* Missing descriptor for MW */
685 goto error;
686 }
687 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
688 /* Master wrapper %d is not 4KB */
689 goto error;
690 }
691 if (i == 0)
692 sii->wrapba[idx] = addrl;
693 }
694
695 /* And finally slave wrappers */
696 for (i = 0; i < nsw; i++) {
697 uint fwp = (nsp == 1) ? 0 : 1;
698 asd =
699 get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
700 &addrl, &addrh, &sizel, &sizeh);
701 if (asd == 0) {
702 /* Missing descriptor for SW */
703 goto error;
704 }
705 if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
706 /* Slave wrapper is not 4KB */
707 goto error;
708 }
709 if ((nmw == 0) && (i == 0))
710 sii->wrapba[idx] = addrl;
711 }
712
713 /* Don't record bridges */
714 if (br)
715 continue;
716
717 /* Done with core */
718 sii->numcores++;
719 }
720
721 error:
722 /* Reached end of erom without finding END */
723 sii->numcores = 0;
724 return;
725}
726
727/*
728 * This function changes the logical "focus" to the indicated core.
729 * Return the current core's virtual address. Since each core starts with the
730 * same set of registers (BIST, clock control, etc), the returned address
731 * contains the first register of this 'common' register block (not to be
732 * confused with 'common core').
733 */
734void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
735{
736 struct si_info *sii = (struct si_info *)sih;
737 u32 addr = sii->coresba[coreidx];
738 u32 wrap = sii->wrapba[coreidx];
739
740 if (coreidx >= sii->numcores)
741 return NULL;
742
743 /* point bar0 window */
744 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
745 /* point bar0 2nd 4KB window */
746 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
747 sii->curidx = coreidx;
748
749 return sii->curmap;
750}
751
752/* Return the number of address spaces in current core */
753int ai_numaddrspaces(struct si_pub *sih)
754{
755 return 2;
756}
757
758/* Return the address of the nth address space in the current core */
759u32 ai_addrspace(struct si_pub *sih, uint asidx)
760{
761 struct si_info *sii;
762 uint cidx;
763
764 sii = (struct si_info *)sih;
765 cidx = sii->curidx;
766
767 if (asidx == 0)
768 return sii->coresba[cidx];
769 else if (asidx == 1)
770 return sii->coresba2[cidx];
771 else {
772 /* Need to parse the erom again to find addr space */
773 return 0;
774 }
775}
776
777/* Return the size of the nth address space in the current core */
778u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
779{
780 struct si_info *sii;
781 uint cidx;
782
783 sii = (struct si_info *)sih;
784 cidx = sii->curidx;
785
786 if (asidx == 0)
787 return sii->coresba_size[cidx];
788 else if (asidx == 1)
789 return sii->coresba2_size[cidx];
790 else {
791 /* Need to parse the erom again to find addr */
792 return 0;
793 }
794}
795
796uint ai_flag(struct si_pub *sih)
797{
798 struct si_info *sii;
799 struct aidmp *ai;
800
801 sii = (struct si_info *)sih;
802 ai = sii->curwrap;
803
804 return R_REG(&ai->oobselouta30) & 0x1f;
805}
806
807void ai_setint(struct si_pub *sih, int siflag)
808{
809}
810
811uint ai_corevendor(struct si_pub *sih)
812{
813 struct si_info *sii;
814 u32 cia;
815
816 sii = (struct si_info *)sih;
817 cia = sii->cia[sii->curidx];
818 return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
819}
820
821uint ai_corerev(struct si_pub *sih)
822{
823 struct si_info *sii;
824 u32 cib;
825
826 sii = (struct si_info *)sih;
827 cib = sii->cib[sii->curidx];
828 return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
829}
830
831bool ai_iscoreup(struct si_pub *sih)
832{
833 struct si_info *sii;
834 struct aidmp *ai;
835
836 sii = (struct si_info *)sih;
837 ai = sii->curwrap;
838
839 return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
840 SICF_CLOCK_EN)
841 && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
842}
843
844void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
845{
846 struct si_info *sii;
847 struct aidmp *ai;
848 u32 w;
849
850 sii = (struct si_info *)sih;
851
852 ai = sii->curwrap;
853
854 if (mask || val) {
855 w = ((R_REG(&ai->ioctrl) & ~mask) | val);
856 W_REG(&ai->ioctrl, w);
857 }
858}
859
860u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
861{
862 struct si_info *sii;
863 struct aidmp *ai;
864 u32 w;
865
866 sii = (struct si_info *)sih;
867 ai = sii->curwrap;
868
869 if (mask || val) {
870 w = ((R_REG(&ai->ioctrl) & ~mask) | val);
871 W_REG(&ai->ioctrl, w);
872 }
873
874 return R_REG(&ai->ioctrl);
875}
876
877/* return true if PCIE capability exists in the pci config space */ 457/* return true if PCIE capability exists in the pci config space */
878static bool ai_ispcie(struct si_info *sii) 458static bool ai_ispcie(struct si_info *sii)
879{ 459{
880 u8 cap_ptr; 460 u8 cap_ptr;
881 461
882 cap_ptr = 462 cap_ptr =
883 pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL, 463 pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL,
884 NULL); 464 NULL);
885 if (!cap_ptr) 465 if (!cap_ptr)
886 return false; 466 return false;
@@ -896,117 +476,69 @@ static bool ai_buscore_prep(struct si_info *sii)
896 return true; 476 return true;
897} 477}
898 478
899u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
900{
901 struct si_info *sii;
902 struct aidmp *ai;
903 u32 w;
904
905 sii = (struct si_info *)sih;
906 ai = sii->curwrap;
907
908 if (mask || val) {
909 w = ((R_REG(&ai->iostatus) & ~mask) | val);
910 W_REG(&ai->iostatus, w);
911 }
912
913 return R_REG(&ai->iostatus);
914}
915
916static bool 479static bool
917ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx) 480ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
918{ 481{
919 bool pci, pcie; 482 struct bcma_device *pci = NULL;
920 uint i; 483 struct bcma_device *pcie = NULL;
921 uint pciidx, pcieidx, pcirev, pcierev; 484 struct bcma_device *core;
922 struct chipcregs __iomem *cc;
923 485
924 cc = ai_setcoreidx(&sii->pub, SI_CC_IDX); 486
487 /* no cores found, bail out */
488 if (cc->bus->nr_cores == 0)
489 return false;
925 490
926 /* get chipcommon rev */ 491 /* get chipcommon rev */
927 sii->pub.ccrev = (int)ai_corerev(&sii->pub); 492 sii->pub.ccrev = cc->id.rev;
928 493
929 /* get chipcommon chipstatus */ 494 /* get chipcommon chipstatus */
930 if (sii->pub.ccrev >= 11) 495 if (ai_get_ccrev(&sii->pub) >= 11)
931 sii->pub.chipst = R_REG(&cc->chipstatus); 496 sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
932 497
933 /* get chipcommon capabilites */ 498 /* get chipcommon capabilites */
934 sii->pub.cccaps = R_REG(&cc->capabilities); 499 sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
935 /* get chipcommon extended capabilities */
936
937 if (sii->pub.ccrev >= 35)
938 sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
939 500
940 /* get pmu rev and caps */ 501 /* get pmu rev and caps */
941 if (sii->pub.cccaps & CC_CAP_PMU) { 502 if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
942 sii->pub.pmucaps = R_REG(&cc->pmucapabilities); 503 sii->pub.pmucaps = bcma_read32(cc,
504 CHIPCREGOFFS(pmucapabilities));
943 sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; 505 sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
944 } 506 }
945 507
946 /* figure out bus/orignal core idx */ 508 /* figure out buscore */
947 sii->pub.buscoretype = NODEV_CORE_ID; 509 list_for_each_entry(core, &cc->bus->cores, list) {
948 sii->pub.buscorerev = NOREV;
949 sii->pub.buscoreidx = BADIDX;
950
951 pci = pcie = false;
952 pcirev = pcierev = NOREV;
953 pciidx = pcieidx = BADIDX;
954
955 for (i = 0; i < sii->numcores; i++) {
956 uint cid, crev; 510 uint cid, crev;
957 511
958 ai_setcoreidx(&sii->pub, i); 512 cid = core->id.id;
959 cid = ai_coreid(&sii->pub); 513 crev = core->id.rev;
960 crev = ai_corerev(&sii->pub);
961 514
962 if (cid == PCI_CORE_ID) { 515 if (cid == PCI_CORE_ID) {
963 pciidx = i; 516 pci = core;
964 pcirev = crev;
965 pci = true;
966 } else if (cid == PCIE_CORE_ID) { 517 } else if (cid == PCIE_CORE_ID) {
967 pcieidx = i; 518 pcie = core;
968 pcierev = crev;
969 pcie = true;
970 } 519 }
971
972 /* find the core idx before entering this func. */
973 if ((savewin && (savewin == sii->coresba[i])) ||
974 (cc == sii->regs[i]))
975 *origidx = i;
976 } 520 }
977 521
978 if (pci && pcie) { 522 if (pci && pcie) {
979 if (ai_ispcie(sii)) 523 if (ai_ispcie(sii))
980 pci = false; 524 pci = NULL;
981 else 525 else
982 pcie = false; 526 pcie = NULL;
983 } 527 }
984 if (pci) { 528 if (pci) {
985 sii->pub.buscoretype = PCI_CORE_ID; 529 sii->buscore = pci;
986 sii->pub.buscorerev = pcirev;
987 sii->pub.buscoreidx = pciidx;
988 } else if (pcie) { 530 } else if (pcie) {
989 sii->pub.buscoretype = PCIE_CORE_ID; 531 sii->buscore = pcie;
990 sii->pub.buscorerev = pcierev;
991 sii->pub.buscoreidx = pcieidx;
992 } 532 }
993 533
994 /* fixup necessary chip/core configurations */ 534 /* fixup necessary chip/core configurations */
995 if (SI_FAST(sii)) { 535 if (!sii->pch) {
996 if (!sii->pch) { 536 sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core);
997 sii->pch = pcicore_init(&sii->pub, sii->pbus, 537 if (sii->pch == NULL)
998 (__iomem void *)PCIEREGS(sii)); 538 return false;
999 if (sii->pch == NULL)
1000 return false;
1001 }
1002 } 539 }
1003 if (ai_pci_fixcfg(&sii->pub)) { 540 if (ai_pci_fixcfg(&sii->pub))
1004 /* si_doattach: si_pci_fixcfg failed */
1005 return false; 541 return false;
1006 }
1007
1008 /* return to the original core */
1009 ai_setcoreidx(&sii->pub, *origidx);
1010 542
1011 return true; 543 return true;
1012} 544}
@@ -1019,39 +551,27 @@ static __used void ai_nvram_process(struct si_info *sii)
1019 uint w = 0; 551 uint w = 0;
1020 552
1021 /* do a pci config read to get subsystem id and subvendor id */ 553 /* do a pci config read to get subsystem id and subvendor id */
1022 pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w); 554 pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w);
1023 555
1024 sii->pub.boardvendor = w & 0xffff; 556 sii->pub.boardvendor = w & 0xffff;
1025 sii->pub.boardtype = (w >> 16) & 0xffff; 557 sii->pub.boardtype = (w >> 16) & 0xffff;
1026 sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS);
1027} 558}
1028 559
1029static struct si_info *ai_doattach(struct si_info *sii, 560static struct si_info *ai_doattach(struct si_info *sii,
1030 void __iomem *regs, struct pci_dev *pbus) 561 struct bcma_bus *pbus)
1031{ 562{
1032 struct si_pub *sih = &sii->pub; 563 struct si_pub *sih = &sii->pub;
1033 u32 w, savewin; 564 u32 w, savewin;
1034 struct chipcregs __iomem *cc; 565 struct bcma_device *cc;
1035 uint socitype; 566 uint socitype;
1036 uint origidx;
1037
1038 memset((unsigned char *) sii, 0, sizeof(struct si_info));
1039 567
1040 savewin = 0; 568 savewin = 0;
1041 569
1042 sih->buscoreidx = BADIDX; 570 sii->icbus = pbus;
1043 571 sii->pcibus = pbus->host_pci;
1044 sii->curmap = regs;
1045 sii->pbus = pbus;
1046 572
1047 /* find Chipcommon address */ 573 /* switch to Chipcommon core */
1048 pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin); 574 cc = pbus->drv_cc.core;
1049 if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
1050 savewin = SI_ENUM_BASE;
1051
1052 pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
1053 SI_ENUM_BASE);
1054 cc = (struct chipcregs __iomem *) regs;
1055 575
1056 /* bus/core/clk setup for register access */ 576 /* bus/core/clk setup for register access */
1057 if (!ai_buscore_prep(sii)) 577 if (!ai_buscore_prep(sii))
@@ -1064,89 +584,69 @@ static struct si_info *ai_doattach(struct si_info *sii,
1064 * hosts w/o chipcommon), some way of recognizing them needs to 584 * hosts w/o chipcommon), some way of recognizing them needs to
1065 * be added here. 585 * be added here.
1066 */ 586 */
1067 w = R_REG(&cc->chipid); 587 w = bcma_read32(cc, CHIPCREGOFFS(chipid));
1068 socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; 588 socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
1069 /* Might as wll fill in chip id rev & pkg */ 589 /* Might as wll fill in chip id rev & pkg */
1070 sih->chip = w & CID_ID_MASK; 590 sih->chip = w & CID_ID_MASK;
1071 sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; 591 sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
1072 sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; 592 sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
1073 593
1074 sih->issim = false;
1075
1076 /* scan for cores */ 594 /* scan for cores */
1077 if (socitype == SOCI_AI) { 595 if (socitype != SOCI_AI)
1078 SI_MSG("Found chip type AI (0x%08x)\n", w);
1079 /* pass chipc address instead of original core base */
1080 ai_scan(&sii->pub, cc);
1081 } else {
1082 /* Found chip of unknown type */
1083 return NULL;
1084 }
1085 /* no cores found, bail out */
1086 if (sii->numcores == 0)
1087 return NULL; 596 return NULL;
1088 597
1089 /* bus/core/clk setup */ 598 SI_MSG("Found chip type AI (0x%08x)\n", w);
1090 origidx = SI_CC_IDX; 599 if (!ai_buscore_setup(sii, cc))
1091 if (!ai_buscore_setup(sii, savewin, &origidx))
1092 goto exit; 600 goto exit;
1093 601
1094 /* Init nvram from sprom/otp if they exist */ 602 /* Init nvram from sprom/otp if they exist */
1095 if (srom_var_init(&sii->pub, cc)) 603 if (srom_var_init(&sii->pub))
1096 goto exit; 604 goto exit;
1097 605
1098 ai_nvram_process(sii); 606 ai_nvram_process(sii);
1099 607
1100 /* === NVRAM, clock is ready === */ 608 /* === NVRAM, clock is ready === */
1101 cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0); 609 bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0);
1102 W_REG(&cc->gpiopullup, 0); 610 bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0);
1103 W_REG(&cc->gpiopulldown, 0);
1104 ai_setcoreidx(sih, origidx);
1105 611
1106 /* PMU specific initializations */ 612 /* PMU specific initializations */
1107 if (sih->cccaps & CC_CAP_PMU) { 613 if (ai_get_cccaps(sih) & CC_CAP_PMU) {
1108 u32 xtalfreq;
1109 si_pmu_init(sih); 614 si_pmu_init(sih);
1110 si_pmu_chip_init(sih); 615 (void)si_pmu_measure_alpclk(sih);
1111
1112 xtalfreq = si_pmu_measure_alpclk(sih);
1113 si_pmu_pll_init(sih, xtalfreq);
1114 si_pmu_res_init(sih); 616 si_pmu_res_init(sih);
1115 si_pmu_swreg_init(sih);
1116 } 617 }
1117 618
1118 /* setup the GPIO based LED powersave register */ 619 /* setup the GPIO based LED powersave register */
1119 w = getintvar(sih, BRCMS_SROM_LEDDC); 620 w = getintvar(sih, BRCMS_SROM_LEDDC);
1120 if (w == 0) 621 if (w == 0)
1121 w = DEFAULT_GPIOTIMERVAL; 622 w = DEFAULT_GPIOTIMERVAL;
1122 ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval), 623 ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
1123 ~0, w); 624 ~0, w);
1124 625
1125 if (PCIE(sii)) 626 if (PCIE(sih))
1126 pcicore_attach(sii->pch, SI_DOATTACH); 627 pcicore_attach(sii->pch, SI_DOATTACH);
1127 628
1128 if (sih->chip == BCM43224_CHIP_ID) { 629 if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
1129 /* 630 /*
1130 * enable 12 mA drive strenth for 43224 and 631 * enable 12 mA drive strenth for 43224 and
1131 * set chipControl register bit 15 632 * set chipControl register bit 15
1132 */ 633 */
1133 if (sih->chiprev == 0) { 634 if (ai_get_chiprev(sih) == 0) {
1134 SI_MSG("Applying 43224A0 WARs\n"); 635 SI_MSG("Applying 43224A0 WARs\n");
1135 ai_corereg(sih, SI_CC_IDX, 636 ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
1136 offsetof(struct chipcregs, chipcontrol), 637 CCTRL43224_GPIO_TOGGLE,
1137 CCTRL43224_GPIO_TOGGLE, 638 CCTRL43224_GPIO_TOGGLE);
1138 CCTRL43224_GPIO_TOGGLE);
1139 si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE, 639 si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
1140 CCTRL_43224A0_12MA_LED_DRIVE); 640 CCTRL_43224A0_12MA_LED_DRIVE);
1141 } 641 }
1142 if (sih->chiprev >= 1) { 642 if (ai_get_chiprev(sih) >= 1) {
1143 SI_MSG("Applying 43224B0+ WARs\n"); 643 SI_MSG("Applying 43224B0+ WARs\n");
1144 si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE, 644 si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
1145 CCTRL_43224B0_12MA_LED_DRIVE); 645 CCTRL_43224B0_12MA_LED_DRIVE);
1146 } 646 }
1147 } 647 }
1148 648
1149 if (sih->chip == BCM4313_CHIP_ID) { 649 if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
1150 /* 650 /*
1151 * enable 12 mA drive strenth for 4313 and 651 * enable 12 mA drive strenth for 4313 and
1152 * set chipControl register bit 1 652 * set chipControl register bit 1
@@ -1167,22 +667,19 @@ static struct si_info *ai_doattach(struct si_info *sii,
1167} 667}
1168 668
1169/* 669/*
1170 * Allocate a si handle. 670 * Allocate a si handle and do the attach.
1171 * devid - pci device id (used to determine chip#)
1172 * osh - opaque OS handle
1173 * regs - virtual address of initial core registers
1174 */ 671 */
1175struct si_pub * 672struct si_pub *
1176ai_attach(void __iomem *regs, struct pci_dev *sdh) 673ai_attach(struct bcma_bus *pbus)
1177{ 674{
1178 struct si_info *sii; 675 struct si_info *sii;
1179 676
1180 /* alloc struct si_info */ 677 /* alloc struct si_info */
1181 sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC); 678 sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC);
1182 if (sii == NULL) 679 if (sii == NULL)
1183 return NULL; 680 return NULL;
1184 681
1185 if (ai_doattach(sii, regs, sdh) == NULL) { 682 if (ai_doattach(sii, pbus) == NULL) {
1186 kfree(sii); 683 kfree(sii);
1187 return NULL; 684 return NULL;
1188 } 685 }
@@ -1211,292 +708,66 @@ void ai_detach(struct si_pub *sih)
1211 kfree(sii); 708 kfree(sii);
1212} 709}
1213 710
1214/* register driver interrupt disabling and restoring callback functions */
1215void
1216ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
1217 void *intrsrestore_fn,
1218 void *intrsenabled_fn, void *intr_arg)
1219{
1220 struct si_info *sii;
1221
1222 sii = (struct si_info *)sih;
1223 sii->intr_arg = intr_arg;
1224 sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
1225 sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
1226 sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
1227 /* save current core id. when this function called, the current core
1228 * must be the core which provides driver functions(il, et, wl, etc.)
1229 */
1230 sii->dev_coreid = sii->coreid[sii->curidx];
1231}
1232
1233void ai_deregister_intr_callback(struct si_pub *sih)
1234{
1235 struct si_info *sii;
1236
1237 sii = (struct si_info *)sih;
1238 sii->intrsoff_fn = NULL;
1239}
1240
1241uint ai_coreid(struct si_pub *sih)
1242{
1243 struct si_info *sii;
1244
1245 sii = (struct si_info *)sih;
1246 return sii->coreid[sii->curidx];
1247}
1248
1249uint ai_coreidx(struct si_pub *sih)
1250{
1251 struct si_info *sii;
1252
1253 sii = (struct si_info *)sih;
1254 return sii->curidx;
1255}
1256
1257bool ai_backplane64(struct si_pub *sih)
1258{
1259 return (sih->cccaps & CC_CAP_BKPLN64) != 0;
1260}
1261
1262/* return index of coreid or BADIDX if not found */ 711/* return index of coreid or BADIDX if not found */
1263uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit) 712struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
1264{ 713{
714 struct bcma_device *core;
1265 struct si_info *sii; 715 struct si_info *sii;
1266 uint found; 716 uint found;
1267 uint i;
1268 717
1269 sii = (struct si_info *)sih; 718 sii = (struct si_info *)sih;
1270 719
1271 found = 0; 720 found = 0;
1272 721
1273 for (i = 0; i < sii->numcores; i++) 722 list_for_each_entry(core, &sii->icbus->cores, list)
1274 if (sii->coreid[i] == coreid) { 723 if (core->id.id == coreid) {
1275 if (found == coreunit) 724 if (found == coreunit)
1276 return i; 725 return core;
1277 found++; 726 found++;
1278 } 727 }
1279 728
1280 return BADIDX; 729 return NULL;
1281}
1282
1283/*
1284 * This function changes logical "focus" to the indicated core;
1285 * must be called with interrupts off.
1286 * Moreover, callers should keep interrupts off during switching
1287 * out of and back to d11 core.
1288 */
1289void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
1290{
1291 uint idx;
1292
1293 idx = ai_findcoreidx(sih, coreid, coreunit);
1294 if (idx >= SI_MAXCORES)
1295 return NULL;
1296
1297 return ai_setcoreidx(sih, idx);
1298}
1299
1300/* Turn off interrupt as required by ai_setcore, before switch core */
1301void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
1302 uint *intr_val)
1303{
1304 void __iomem *cc;
1305 struct si_info *sii;
1306
1307 sii = (struct si_info *)sih;
1308
1309 if (SI_FAST(sii)) {
1310 /* Overloading the origidx variable to remember the coreid,
1311 * this works because the core ids cannot be confused with
1312 * core indices.
1313 */
1314 *origidx = coreid;
1315 if (coreid == CC_CORE_ID)
1316 return CCREGS_FAST(sii);
1317 else if (coreid == sih->buscoretype)
1318 return PCIEREGS(sii);
1319 }
1320 INTR_OFF(sii, *intr_val);
1321 *origidx = sii->curidx;
1322 cc = ai_setcore(sih, coreid, 0);
1323 return cc;
1324}
1325
1326/* restore coreidx and restore interrupt */
1327void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
1328{
1329 struct si_info *sii;
1330
1331 sii = (struct si_info *)sih;
1332 if (SI_FAST(sii)
1333 && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
1334 return;
1335
1336 ai_setcoreidx(sih, coreid);
1337 INTR_RESTORE(sii, intr_val);
1338}
1339
1340void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
1341{
1342 struct si_info *sii = (struct si_info *)sih;
1343 u32 *w = (u32 *) sii->curwrap;
1344 W_REG(w + (offset / 4), val);
1345 return;
1346} 730}
1347 731
1348/* 732/*
1349 * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set 733 * read/modify chipcommon core register.
1350 * operation, switch back to the original core, and return the new value.
1351 *
1352 * When using the silicon backplane, no fiddling with interrupts or core
1353 * switches is needed.
1354 *
1355 * Also, when using pci/pcie, we can optimize away the core switching for pci
1356 * registers and (on newer pci cores) chipcommon registers.
1357 */ 734 */
1358uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask, 735uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
1359 uint val)
1360{ 736{
1361 uint origidx = 0; 737 struct bcma_device *cc;
1362 u32 __iomem *r = NULL; 738 u32 w;
1363 uint w;
1364 uint intr_val = 0;
1365 bool fast = false;
1366 struct si_info *sii; 739 struct si_info *sii;
1367 740
1368 sii = (struct si_info *)sih; 741 sii = (struct si_info *)sih;
1369 742 cc = sii->icbus->drv_cc.core;
1370 if (coreidx >= SI_MAXCORES)
1371 return 0;
1372
1373 /*
1374 * If pci/pcie, we can get at pci/pcie regs
1375 * and on newer cores to chipc
1376 */
1377 if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
1378 /* Chipc registers are mapped at 12KB */
1379 fast = true;
1380 r = (u32 __iomem *)((__iomem char *)sii->curmap +
1381 PCI_16KB0_CCREGS_OFFSET + regoff);
1382 } else if (sii->pub.buscoreidx == coreidx) {
1383 /*
1384 * pci registers are at either in the last 2KB of
1385 * an 8KB window or, in pcie and pci rev 13 at 8KB
1386 */
1387 fast = true;
1388 if (SI_FAST(sii))
1389 r = (u32 __iomem *)((__iomem char *)sii->curmap +
1390 PCI_16KB0_PCIREGS_OFFSET + regoff);
1391 else
1392 r = (u32 __iomem *)((__iomem char *)sii->curmap +
1393 ((regoff >= SBCONFIGOFF) ?
1394 PCI_BAR0_PCISBR_OFFSET :
1395 PCI_BAR0_PCIREGS_OFFSET) + regoff);
1396 }
1397
1398 if (!fast) {
1399 INTR_OFF(sii, intr_val);
1400
1401 /* save current core index */
1402 origidx = ai_coreidx(&sii->pub);
1403
1404 /* switch core */
1405 r = (u32 __iomem *) ((unsigned char __iomem *)
1406 ai_setcoreidx(&sii->pub, coreidx) + regoff);
1407 }
1408 743
1409 /* mask and set */ 744 /* mask and set */
1410 if (mask || val) { 745 if (mask || val) {
1411 w = (R_REG(r) & ~mask) | val; 746 bcma_maskset32(cc, regoff, ~mask, val);
1412 W_REG(r, w);
1413 } 747 }
1414 748
1415 /* readback */ 749 /* readback */
1416 w = R_REG(r); 750 w = bcma_read32(cc, regoff);
1417
1418 if (!fast) {
1419 /* restore core index */
1420 if (origidx != coreidx)
1421 ai_setcoreidx(&sii->pub, origidx);
1422
1423 INTR_RESTORE(sii, intr_val);
1424 }
1425 751
1426 return w; 752 return w;
1427} 753}
1428 754
1429void ai_core_disable(struct si_pub *sih, u32 bits)
1430{
1431 struct si_info *sii;
1432 u32 dummy;
1433 struct aidmp *ai;
1434
1435 sii = (struct si_info *)sih;
1436
1437 ai = sii->curwrap;
1438
1439 /* if core is already in reset, just return */
1440 if (R_REG(&ai->resetctrl) & AIRC_RESET)
1441 return;
1442
1443 W_REG(&ai->ioctrl, bits);
1444 dummy = R_REG(&ai->ioctrl);
1445 udelay(10);
1446
1447 W_REG(&ai->resetctrl, AIRC_RESET);
1448 udelay(1);
1449}
1450
1451/* reset and re-enable a core
1452 * inputs:
1453 * bits - core specific bits that are set during and after reset sequence
1454 * resetbits - core specific bits that are set only during reset sequence
1455 */
1456void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
1457{
1458 struct si_info *sii;
1459 struct aidmp *ai;
1460 u32 dummy;
1461
1462 sii = (struct si_info *)sih;
1463 ai = sii->curwrap;
1464
1465 /*
1466 * Must do the disable sequence first to work
1467 * for arbitrary current core state.
1468 */
1469 ai_core_disable(sih, (bits | resetbits));
1470
1471 /*
1472 * Now do the initialization sequence.
1473 */
1474 W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
1475 dummy = R_REG(&ai->ioctrl);
1476 W_REG(&ai->resetctrl, 0);
1477 udelay(1);
1478
1479 W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
1480 dummy = R_REG(&ai->ioctrl);
1481 udelay(1);
1482}
1483
1484/* return the slow clock source - LPO, XTAL, or PCI */ 755/* return the slow clock source - LPO, XTAL, or PCI */
1485static uint ai_slowclk_src(struct si_info *sii) 756static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
1486{ 757{
1487 struct chipcregs __iomem *cc; 758 struct si_info *sii;
1488 u32 val; 759 u32 val;
1489 760
1490 if (sii->pub.ccrev < 6) { 761 sii = (struct si_info *)sih;
1491 pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, 762 if (ai_get_ccrev(&sii->pub) < 6) {
763 pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT,
1492 &val); 764 &val);
1493 if (val & PCI_CFG_GPIO_SCS) 765 if (val & PCI_CFG_GPIO_SCS)
1494 return SCC_SS_PCI; 766 return SCC_SS_PCI;
1495 return SCC_SS_XTAL; 767 return SCC_SS_XTAL;
1496 } else if (sii->pub.ccrev < 10) { 768 } else if (ai_get_ccrev(&sii->pub) < 10) {
1497 cc = (struct chipcregs __iomem *) 769 return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
1498 ai_setcoreidx(&sii->pub, sii->curidx); 770 SCC_SS_MASK;
1499 return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
1500 } else /* Insta-clock */ 771 } else /* Insta-clock */
1501 return SCC_SS_XTAL; 772 return SCC_SS_XTAL;
1502} 773}
@@ -1505,24 +776,24 @@ static uint ai_slowclk_src(struct si_info *sii)
1505* return the ILP (slowclock) min or max frequency 776* return the ILP (slowclock) min or max frequency
1506* precondition: we've established the chip has dynamic clk control 777* precondition: we've established the chip has dynamic clk control
1507*/ 778*/
1508static uint ai_slowclk_freq(struct si_info *sii, bool max_freq, 779static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq,
1509 struct chipcregs __iomem *cc) 780 struct bcma_device *cc)
1510{ 781{
1511 u32 slowclk; 782 u32 slowclk;
1512 uint div; 783 uint div;
1513 784
1514 slowclk = ai_slowclk_src(sii); 785 slowclk = ai_slowclk_src(sih, cc);
1515 if (sii->pub.ccrev < 6) { 786 if (ai_get_ccrev(sih) < 6) {
1516 if (slowclk == SCC_SS_PCI) 787 if (slowclk == SCC_SS_PCI)
1517 return max_freq ? (PCIMAXFREQ / 64) 788 return max_freq ? (PCIMAXFREQ / 64)
1518 : (PCIMINFREQ / 64); 789 : (PCIMINFREQ / 64);
1519 else 790 else
1520 return max_freq ? (XTALMAXFREQ / 32) 791 return max_freq ? (XTALMAXFREQ / 32)
1521 : (XTALMINFREQ / 32); 792 : (XTALMINFREQ / 32);
1522 } else if (sii->pub.ccrev < 10) { 793 } else if (ai_get_ccrev(sih) < 10) {
1523 div = 4 * 794 div = 4 *
1524 (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >> 795 (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
1525 SCC_CD_SHIFT) + 1); 796 SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
1526 if (slowclk == SCC_SS_LPO) 797 if (slowclk == SCC_SS_LPO)
1527 return max_freq ? LPOMAXFREQ : LPOMINFREQ; 798 return max_freq ? LPOMAXFREQ : LPOMINFREQ;
1528 else if (slowclk == SCC_SS_XTAL) 799 else if (slowclk == SCC_SS_XTAL)
@@ -1533,15 +804,15 @@ static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
1533 : (PCIMINFREQ / div); 804 : (PCIMINFREQ / div);
1534 } else { 805 } else {
1535 /* Chipc rev 10 is InstaClock */ 806 /* Chipc rev 10 is InstaClock */
1536 div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT; 807 div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
1537 div = 4 * (div + 1); 808 div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
1538 return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div); 809 return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
1539 } 810 }
1540 return 0; 811 return 0;
1541} 812}
1542 813
1543static void 814static void
1544ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc) 815ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
1545{ 816{
1546 uint slowmaxfreq, pll_delay, slowclk; 817 uint slowmaxfreq, pll_delay, slowclk;
1547 uint pll_on_delay, fref_sel_delay; 818 uint pll_on_delay, fref_sel_delay;
@@ -1554,55 +825,40 @@ ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
1554 * powered down by dynamic clk control logic. 825 * powered down by dynamic clk control logic.
1555 */ 826 */
1556 827
1557 slowclk = ai_slowclk_src(sii); 828 slowclk = ai_slowclk_src(sih, cc);
1558 if (slowclk != SCC_SS_XTAL) 829 if (slowclk != SCC_SS_XTAL)
1559 pll_delay += XTAL_ON_DELAY; 830 pll_delay += XTAL_ON_DELAY;
1560 831
1561 /* Starting with 4318 it is ILP that is used for the delays */ 832 /* Starting with 4318 it is ILP that is used for the delays */
1562 slowmaxfreq = 833 slowmaxfreq =
1563 ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc); 834 ai_slowclk_freq(sih,
835 (ai_get_ccrev(sih) >= 10) ? false : true, cc);
1564 836
1565 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; 837 pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
1566 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; 838 fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
1567 839
1568 W_REG(&cc->pll_on_delay, pll_on_delay); 840 bcma_write32(cc, CHIPCREGOFFS(pll_on_delay), pll_on_delay);
1569 W_REG(&cc->fref_sel_delay, fref_sel_delay); 841 bcma_write32(cc, CHIPCREGOFFS(fref_sel_delay), fref_sel_delay);
1570} 842}
1571 843
1572/* initialize power control delay registers */ 844/* initialize power control delay registers */
1573void ai_clkctl_init(struct si_pub *sih) 845void ai_clkctl_init(struct si_pub *sih)
1574{ 846{
1575 struct si_info *sii; 847 struct bcma_device *cc;
1576 uint origidx = 0;
1577 struct chipcregs __iomem *cc;
1578 bool fast;
1579 848
1580 if (!(sih->cccaps & CC_CAP_PWR_CTL)) 849 if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
1581 return; 850 return;
1582 851
1583 sii = (struct si_info *)sih; 852 cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
1584 fast = SI_FAST(sii); 853 if (cc == NULL)
1585 if (!fast) { 854 return;
1586 origidx = sii->curidx;
1587 cc = (struct chipcregs __iomem *)
1588 ai_setcore(sih, CC_CORE_ID, 0);
1589 if (cc == NULL)
1590 return;
1591 } else {
1592 cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
1593 if (cc == NULL)
1594 return;
1595 }
1596 855
1597 /* set all Instaclk chip ILP to 1 MHz */ 856 /* set all Instaclk chip ILP to 1 MHz */
1598 if (sih->ccrev >= 10) 857 if (ai_get_ccrev(sih) >= 10)
1599 SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK, 858 bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
1600 (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); 859 (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
1601 860
1602 ai_clkctl_setdelay(sii, cc); 861 ai_clkctl_setdelay(sih, cc);
1603
1604 if (!fast)
1605 ai_setcoreidx(sih, origidx);
1606} 862}
1607 863
1608/* 864/*
@@ -1612,47 +868,25 @@ void ai_clkctl_init(struct si_pub *sih)
1612u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih) 868u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
1613{ 869{
1614 struct si_info *sii; 870 struct si_info *sii;
1615 uint origidx = 0; 871 struct bcma_device *cc;
1616 struct chipcregs __iomem *cc;
1617 uint slowminfreq; 872 uint slowminfreq;
1618 u16 fpdelay; 873 u16 fpdelay;
1619 uint intr_val = 0;
1620 bool fast;
1621 874
1622 sii = (struct si_info *)sih; 875 sii = (struct si_info *)sih;
1623 if (sih->cccaps & CC_CAP_PMU) { 876 if (ai_get_cccaps(sih) & CC_CAP_PMU) {
1624 INTR_OFF(sii, intr_val);
1625 fpdelay = si_pmu_fast_pwrup_delay(sih); 877 fpdelay = si_pmu_fast_pwrup_delay(sih);
1626 INTR_RESTORE(sii, intr_val);
1627 return fpdelay; 878 return fpdelay;
1628 } 879 }
1629 880
1630 if (!(sih->cccaps & CC_CAP_PWR_CTL)) 881 if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
1631 return 0; 882 return 0;
1632 883
1633 fast = SI_FAST(sii);
1634 fpdelay = 0; 884 fpdelay = 0;
1635 if (!fast) { 885 cc = ai_findcore(sih, CC_CORE_ID, 0);
1636 origidx = sii->curidx; 886 if (cc) {
1637 INTR_OFF(sii, intr_val); 887 slowminfreq = ai_slowclk_freq(sih, false, cc);
1638 cc = (struct chipcregs __iomem *) 888 fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
1639 ai_setcore(sih, CC_CORE_ID, 0); 889 * 1000000) + (slowminfreq - 1)) / slowminfreq;
1640 if (cc == NULL)
1641 goto done;
1642 } else {
1643 cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
1644 if (cc == NULL)
1645 goto done;
1646 }
1647
1648 slowminfreq = ai_slowclk_freq(sii, false, cc);
1649 fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
1650 (slowminfreq - 1)) / slowminfreq;
1651
1652 done:
1653 if (!fast) {
1654 ai_setcoreidx(sih, origidx);
1655 INTR_RESTORE(sii, intr_val);
1656 } 890 }
1657 return fpdelay; 891 return fpdelay;
1658} 892}
@@ -1666,12 +900,12 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
1666 sii = (struct si_info *)sih; 900 sii = (struct si_info *)sih;
1667 901
1668 /* pcie core doesn't have any mapping to control the xtal pu */ 902 /* pcie core doesn't have any mapping to control the xtal pu */
1669 if (PCIE(sii)) 903 if (PCIE(sih))
1670 return -1; 904 return -1;
1671 905
1672 pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in); 906 pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in);
1673 pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out); 907 pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out);
1674 pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen); 908 pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen);
1675 909
1676 /* 910 /*
1677 * Avoid glitching the clock if GPRS is already using it. 911 * Avoid glitching the clock if GPRS is already using it.
@@ -1692,9 +926,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
1692 out |= PCI_CFG_GPIO_XTAL; 926 out |= PCI_CFG_GPIO_XTAL;
1693 if (what & PLL) 927 if (what & PLL)
1694 out |= PCI_CFG_GPIO_PLL; 928 out |= PCI_CFG_GPIO_PLL;
1695 pci_write_config_dword(sii->pbus, 929 pci_write_config_dword(sii->pcibus,
1696 PCI_GPIO_OUT, out); 930 PCI_GPIO_OUT, out);
1697 pci_write_config_dword(sii->pbus, 931 pci_write_config_dword(sii->pcibus,
1698 PCI_GPIO_OUTEN, outen); 932 PCI_GPIO_OUTEN, outen);
1699 udelay(XTAL_ON_DELAY); 933 udelay(XTAL_ON_DELAY);
1700 } 934 }
@@ -1702,7 +936,7 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
1702 /* turn pll on */ 936 /* turn pll on */
1703 if (what & PLL) { 937 if (what & PLL) {
1704 out &= ~PCI_CFG_GPIO_PLL; 938 out &= ~PCI_CFG_GPIO_PLL;
1705 pci_write_config_dword(sii->pbus, 939 pci_write_config_dword(sii->pcibus,
1706 PCI_GPIO_OUT, out); 940 PCI_GPIO_OUT, out);
1707 mdelay(2); 941 mdelay(2);
1708 } 942 }
@@ -1711,9 +945,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
1711 out &= ~PCI_CFG_GPIO_XTAL; 945 out &= ~PCI_CFG_GPIO_XTAL;
1712 if (what & PLL) 946 if (what & PLL)
1713 out |= PCI_CFG_GPIO_PLL; 947 out |= PCI_CFG_GPIO_PLL;
1714 pci_write_config_dword(sii->pbus, 948 pci_write_config_dword(sii->pcibus,
1715 PCI_GPIO_OUT, out); 949 PCI_GPIO_OUT, out);
1716 pci_write_config_dword(sii->pbus, 950 pci_write_config_dword(sii->pcibus,
1717 PCI_GPIO_OUTEN, outen); 951 PCI_GPIO_OUTEN, outen);
1718 } 952 }
1719 953
@@ -1723,63 +957,52 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
1723/* clk control mechanism through chipcommon, no policy checking */ 957/* clk control mechanism through chipcommon, no policy checking */
1724static bool _ai_clkctl_cc(struct si_info *sii, uint mode) 958static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
1725{ 959{
1726 uint origidx = 0; 960 struct bcma_device *cc;
1727 struct chipcregs __iomem *cc;
1728 u32 scc; 961 u32 scc;
1729 uint intr_val = 0;
1730 bool fast = SI_FAST(sii);
1731 962
1732 /* chipcommon cores prior to rev6 don't support dynamic clock control */ 963 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1733 if (sii->pub.ccrev < 6) 964 if (ai_get_ccrev(&sii->pub) < 6)
1734 return false; 965 return false;
1735 966
1736 if (!fast) { 967 cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
1737 INTR_OFF(sii, intr_val);
1738 origidx = sii->curidx;
1739 cc = (struct chipcregs __iomem *)
1740 ai_setcore(&sii->pub, CC_CORE_ID, 0);
1741 } else {
1742 cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
1743 if (cc == NULL)
1744 goto done;
1745 }
1746 968
1747 if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20)) 969 if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
1748 goto done; 970 (ai_get_ccrev(&sii->pub) < 20))
971 return mode == CLK_FAST;
1749 972
1750 switch (mode) { 973 switch (mode) {
1751 case CLK_FAST: /* FORCEHT, fast (pll) clock */ 974 case CLK_FAST: /* FORCEHT, fast (pll) clock */
1752 if (sii->pub.ccrev < 10) { 975 if (ai_get_ccrev(&sii->pub) < 10) {
1753 /* 976 /*
1754 * don't forget to force xtal back 977 * don't forget to force xtal back
1755 * on before we clear SCC_DYN_XTAL.. 978 * on before we clear SCC_DYN_XTAL..
1756 */ 979 */
1757 ai_clkctl_xtal(&sii->pub, XTAL, ON); 980 ai_clkctl_xtal(&sii->pub, XTAL, ON);
1758 SET_REG(&cc->slow_clk_ctl, 981 bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl),
1759 (SCC_XC | SCC_FS | SCC_IP), SCC_IP); 982 (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
1760 } else if (sii->pub.ccrev < 20) { 983 } else if (ai_get_ccrev(&sii->pub) < 20) {
1761 OR_REG(&cc->system_clk_ctl, SYCC_HR); 984 bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR);
1762 } else { 985 } else {
1763 OR_REG(&cc->clk_ctl_st, CCS_FORCEHT); 986 bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT);
1764 } 987 }
1765 988
1766 /* wait for the PLL */ 989 /* wait for the PLL */
1767 if (sii->pub.cccaps & CC_CAP_PMU) { 990 if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
1768 u32 htavail = CCS_HTAVAIL; 991 u32 htavail = CCS_HTAVAIL;
1769 SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail) 992 SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) &
1770 == 0), PMU_MAX_TRANSITION_DLY); 993 htavail) == 0), PMU_MAX_TRANSITION_DLY);
1771 } else { 994 } else {
1772 udelay(PLL_DELAY); 995 udelay(PLL_DELAY);
1773 } 996 }
1774 break; 997 break;
1775 998
1776 case CLK_DYNAMIC: /* enable dynamic clock control */ 999 case CLK_DYNAMIC: /* enable dynamic clock control */
1777 if (sii->pub.ccrev < 10) { 1000 if (ai_get_ccrev(&sii->pub) < 10) {
1778 scc = R_REG(&cc->slow_clk_ctl); 1001 scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl));
1779 scc &= ~(SCC_FS | SCC_IP | SCC_XC); 1002 scc &= ~(SCC_FS | SCC_IP | SCC_XC);
1780 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL) 1003 if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
1781 scc |= SCC_XC; 1004 scc |= SCC_XC;
1782 W_REG(&cc->slow_clk_ctl, scc); 1005 bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc);
1783 1006
1784 /* 1007 /*
1785 * for dynamic control, we have to 1008 * for dynamic control, we have to
@@ -1787,11 +1010,11 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
1787 */ 1010 */
1788 if (scc & SCC_XC) 1011 if (scc & SCC_XC)
1789 ai_clkctl_xtal(&sii->pub, XTAL, OFF); 1012 ai_clkctl_xtal(&sii->pub, XTAL, OFF);
1790 } else if (sii->pub.ccrev < 20) { 1013 } else if (ai_get_ccrev(&sii->pub) < 20) {
1791 /* Instaclock */ 1014 /* Instaclock */
1792 AND_REG(&cc->system_clk_ctl, ~SYCC_HR); 1015 bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR);
1793 } else { 1016 } else {
1794 AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT); 1017 bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT);
1795 } 1018 }
1796 break; 1019 break;
1797 1020
@@ -1799,11 +1022,6 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
1799 break; 1022 break;
1800 } 1023 }
1801 1024
1802 done:
1803 if (!fast) {
1804 ai_setcoreidx(&sii->pub, origidx);
1805 INTR_RESTORE(sii, intr_val);
1806 }
1807 return mode == CLK_FAST; 1025 return mode == CLK_FAST;
1808} 1026}
1809 1027
@@ -1822,46 +1040,25 @@ bool ai_clkctl_cc(struct si_pub *sih, uint mode)
1822 sii = (struct si_info *)sih; 1040 sii = (struct si_info *)sih;
1823 1041
1824 /* chipcommon cores prior to rev6 don't support dynamic clock control */ 1042 /* chipcommon cores prior to rev6 don't support dynamic clock control */
1825 if (sih->ccrev < 6) 1043 if (ai_get_ccrev(sih) < 6)
1826 return false; 1044 return false;
1827 1045
1828 if (PCI_FORCEHT(sii)) 1046 if (PCI_FORCEHT(sih))
1829 return mode == CLK_FAST; 1047 return mode == CLK_FAST;
1830 1048
1831 return _ai_clkctl_cc(sii, mode); 1049 return _ai_clkctl_cc(sii, mode);
1832} 1050}
1833 1051
1834/* Build device path */
1835int ai_devpath(struct si_pub *sih, char *path, int size)
1836{
1837 int slen;
1838
1839 if (!path || size <= 0)
1840 return -1;
1841
1842 slen = snprintf(path, (size_t) size, "pci/%u/%u/",
1843 ((struct si_info *)sih)->pbus->bus->number,
1844 PCI_SLOT(((struct pci_dev *)
1845 (((struct si_info *)(sih))->pbus))->devfn));
1846
1847 if (slen < 0 || slen >= size) {
1848 path[0] = '\0';
1849 return -1;
1850 }
1851
1852 return 0;
1853}
1854
1855void ai_pci_up(struct si_pub *sih) 1052void ai_pci_up(struct si_pub *sih)
1856{ 1053{
1857 struct si_info *sii; 1054 struct si_info *sii;
1858 1055
1859 sii = (struct si_info *)sih; 1056 sii = (struct si_info *)sih;
1860 1057
1861 if (PCI_FORCEHT(sii)) 1058 if (PCI_FORCEHT(sih))
1862 _ai_clkctl_cc(sii, CLK_FAST); 1059 _ai_clkctl_cc(sii, CLK_FAST);
1863 1060
1864 if (PCIE(sii)) 1061 if (PCIE(sih))
1865 pcicore_up(sii->pch, SI_PCIUP); 1062 pcicore_up(sii->pch, SI_PCIUP);
1866 1063
1867} 1064}
@@ -1884,7 +1081,7 @@ void ai_pci_down(struct si_pub *sih)
1884 sii = (struct si_info *)sih; 1081 sii = (struct si_info *)sih;
1885 1082
1886 /* release FORCEHT since chip is going to "down" state */ 1083 /* release FORCEHT since chip is going to "down" state */
1887 if (PCI_FORCEHT(sii)) 1084 if (PCI_FORCEHT(sih))
1888 _ai_clkctl_cc(sii, CLK_DYNAMIC); 1085 _ai_clkctl_cc(sii, CLK_DYNAMIC);
1889 1086
1890 pcicore_down(sii->pch, SI_PCIDOWN); 1087 pcicore_down(sii->pch, SI_PCIDOWN);
@@ -1897,42 +1094,23 @@ void ai_pci_down(struct si_pub *sih)
1897void ai_pci_setup(struct si_pub *sih, uint coremask) 1094void ai_pci_setup(struct si_pub *sih, uint coremask)
1898{ 1095{
1899 struct si_info *sii; 1096 struct si_info *sii;
1900 struct sbpciregs __iomem *regs = NULL; 1097 u32 w;
1901 u32 siflag = 0, w;
1902 uint idx = 0;
1903 1098
1904 sii = (struct si_info *)sih; 1099 sii = (struct si_info *)sih;
1905 1100
1906 if (PCI(sii)) {
1907 /* get current core index */
1908 idx = sii->curidx;
1909
1910 /* we interrupt on this backplane flag number */
1911 siflag = ai_flag(sih);
1912
1913 /* switch over to pci core */
1914 regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
1915 }
1916
1917 /* 1101 /*
1918 * Enable sb->pci interrupts. Assume 1102 * Enable sb->pci interrupts. Assume
1919 * PCI rev 2.3 support was added in pci core rev 6 and things changed.. 1103 * PCI rev 2.3 support was added in pci core rev 6 and things changed..
1920 */ 1104 */
1921 if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) { 1105 if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
1922 /* pci config write to set this core bit in PCIIntMask */ 1106 /* pci config write to set this core bit in PCIIntMask */
1923 pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w); 1107 pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w);
1924 w |= (coremask << PCI_SBIM_SHIFT); 1108 w |= (coremask << PCI_SBIM_SHIFT);
1925 pci_write_config_dword(sii->pbus, PCI_INT_MASK, w); 1109 pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w);
1926 } else {
1927 /* set sbintvec bit for our flag number */
1928 ai_setint(sih, siflag);
1929 } 1110 }
1930 1111
1931 if (PCI(sii)) { 1112 if (PCI(sih)) {
1932 pcicore_pci_setup(sii->pch, regs); 1113 pcicore_pci_setup(sii->pch);
1933
1934 /* switch back to previous core */
1935 ai_setcoreidx(sih, idx);
1936 } 1114 }
1937} 1115}
1938 1116
@@ -1942,25 +1120,11 @@ void ai_pci_setup(struct si_pub *sih, uint coremask)
1942 */ 1120 */
1943int ai_pci_fixcfg(struct si_pub *sih) 1121int ai_pci_fixcfg(struct si_pub *sih)
1944{ 1122{
1945 uint origidx;
1946 void __iomem *regs = NULL;
1947 struct si_info *sii = (struct si_info *)sih; 1123 struct si_info *sii = (struct si_info *)sih;
1948 1124
1949 /* Fixup PI in SROM shadow area to enable the correct PCI core access */ 1125 /* Fixup PI in SROM shadow area to enable the correct PCI core access */
1950 /* save the current index */
1951 origidx = ai_coreidx(&sii->pub);
1952
1953 /* check 'pi' is correct and fix it if not */ 1126 /* check 'pi' is correct and fix it if not */
1954 regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0); 1127 pcicore_fixcfg(sii->pch);
1955 if (sii->pub.buscoretype == PCIE_CORE_ID)
1956 pcicore_fixcfg_pcie(sii->pch,
1957 (struct sbpcieregs __iomem *)regs);
1958 else if (sii->pub.buscoretype == PCI_CORE_ID)
1959 pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
1960
1961 /* restore the original index */
1962 ai_setcoreidx(&sii->pub, origidx);
1963
1964 pcicore_hwup(sii->pch); 1128 pcicore_hwup(sii->pch);
1965 return 0; 1129 return 0;
1966} 1130}
@@ -1971,58 +1135,42 @@ u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
1971 uint regoff; 1135 uint regoff;
1972 1136
1973 regoff = offsetof(struct chipcregs, gpiocontrol); 1137 regoff = offsetof(struct chipcregs, gpiocontrol);
1974 return ai_corereg(sih, SI_CC_IDX, regoff, mask, val); 1138 return ai_cc_reg(sih, regoff, mask, val);
1975} 1139}
1976 1140
1977void ai_chipcontrl_epa4331(struct si_pub *sih, bool on) 1141void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
1978{ 1142{
1979 struct si_info *sii; 1143 struct bcma_device *cc;
1980 struct chipcregs __iomem *cc;
1981 uint origidx;
1982 u32 val; 1144 u32 val;
1983 1145
1984 sii = (struct si_info *)sih; 1146 cc = ai_findcore(sih, CC_CORE_ID, 0);
1985 origidx = ai_coreidx(sih);
1986
1987 cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
1988
1989 val = R_REG(&cc->chipcontrol);
1990 1147
1991 if (on) { 1148 if (on) {
1992 if (sih->chippkg == 9 || sih->chippkg == 0xb) 1149 if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
1993 /* Ext PA Controls for 4331 12x9 Package */ 1150 /* Ext PA Controls for 4331 12x9 Package */
1994 W_REG(&cc->chipcontrol, val | 1151 bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
1995 CCTRL4331_EXTPA_EN | 1152 CCTRL4331_EXTPA_EN |
1996 CCTRL4331_EXTPA_ON_GPIO2_5); 1153 CCTRL4331_EXTPA_ON_GPIO2_5);
1997 else 1154 else
1998 /* Ext PA Controls for 4331 12x12 Package */ 1155 /* Ext PA Controls for 4331 12x12 Package */
1999 W_REG(&cc->chipcontrol, 1156 bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
2000 val | CCTRL4331_EXTPA_EN); 1157 CCTRL4331_EXTPA_EN);
2001 } else { 1158 } else {
2002 val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5); 1159 val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
2003 W_REG(&cc->chipcontrol, val); 1160 bcma_mask32(cc, CHIPCREGOFFS(chipcontrol),
1161 ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5));
2004 } 1162 }
2005
2006 ai_setcoreidx(sih, origidx);
2007} 1163}
2008 1164
2009/* Enable BT-COEX & Ex-PA for 4313 */ 1165/* Enable BT-COEX & Ex-PA for 4313 */
2010void ai_epa_4313war(struct si_pub *sih) 1166void ai_epa_4313war(struct si_pub *sih)
2011{ 1167{
2012 struct si_info *sii; 1168 struct bcma_device *cc;
2013 struct chipcregs __iomem *cc;
2014 uint origidx;
2015 1169
2016 sii = (struct si_info *)sih; 1170 cc = ai_findcore(sih, CC_CORE_ID, 0);
2017 origidx = ai_coreidx(sih);
2018
2019 cc = ai_setcore(sih, CC_CORE_ID, 0);
2020 1171
2021 /* EPA Fix */ 1172 /* EPA Fix */
2022 W_REG(&cc->gpiocontrol, 1173 bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
2023 R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
2024
2025 ai_setcoreidx(sih, origidx);
2026} 1174}
2027 1175
2028/* check if the device is removed */ 1176/* check if the device is removed */
@@ -2033,7 +1181,7 @@ bool ai_deviceremoved(struct si_pub *sih)
2033 1181
2034 sii = (struct si_info *)sih; 1182 sii = (struct si_info *)sih;
2035 1183
2036 pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w); 1184 pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w);
2037 if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM) 1185 if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
2038 return true; 1186 return true;
2039 1187
@@ -2042,26 +1190,23 @@ bool ai_deviceremoved(struct si_pub *sih)
2042 1190
2043bool ai_is_sprom_available(struct si_pub *sih) 1191bool ai_is_sprom_available(struct si_pub *sih)
2044{ 1192{
2045 if (sih->ccrev >= 31) { 1193 struct si_info *sii = (struct si_info *)sih;
2046 struct si_info *sii; 1194
2047 uint origidx; 1195 if (ai_get_ccrev(sih) >= 31) {
2048 struct chipcregs __iomem *cc; 1196 struct bcma_device *cc;
2049 u32 sromctrl; 1197 u32 sromctrl;
2050 1198
2051 if ((sih->cccaps & CC_CAP_SROM) == 0) 1199 if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
2052 return false; 1200 return false;
2053 1201
2054 sii = (struct si_info *)sih; 1202 cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
2055 origidx = sii->curidx; 1203 sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol));
2056 cc = ai_setcoreidx(sih, SI_CC_IDX);
2057 sromctrl = R_REG(&cc->sromcontrol);
2058 ai_setcoreidx(sih, origidx);
2059 return sromctrl & SRC_PRESENT; 1204 return sromctrl & SRC_PRESENT;
2060 } 1205 }
2061 1206
2062 switch (sih->chip) { 1207 switch (ai_get_chip_id(sih)) {
2063 case BCM4313_CHIP_ID: 1208 case BCM4313_CHIP_ID:
2064 return (sih->chipst & CST4313_SPROM_PRESENT) != 0; 1209 return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
2065 default: 1210 default:
2066 return true; 1211 return true;
2067 } 1212 }
@@ -2069,9 +1214,11 @@ bool ai_is_sprom_available(struct si_pub *sih)
2069 1214
2070bool ai_is_otp_disabled(struct si_pub *sih) 1215bool ai_is_otp_disabled(struct si_pub *sih)
2071{ 1216{
2072 switch (sih->chip) { 1217 struct si_info *sii = (struct si_info *)sih;
1218
1219 switch (ai_get_chip_id(sih)) {
2073 case BCM4313_CHIP_ID: 1220 case BCM4313_CHIP_ID:
2074 return (sih->chipst & CST4313_OTP_PRESENT) == 0; 1221 return (sii->chipst & CST4313_OTP_PRESENT) == 0;
2075 /* These chips always have their OTP on */ 1222 /* These chips always have their OTP on */
2076 case BCM43224_CHIP_ID: 1223 case BCM43224_CHIP_ID:
2077 case BCM43225_CHIP_ID: 1224 case BCM43225_CHIP_ID:
@@ -2079,3 +1226,15 @@ bool ai_is_otp_disabled(struct si_pub *sih)
2079 return false; 1226 return false;
2080 } 1227 }
2081} 1228}
1229
1230uint ai_get_buscoretype(struct si_pub *sih)
1231{
1232 struct si_info *sii = (struct si_info *)sih;
1233 return sii->buscore->id.id;
1234}
1235
1236uint ai_get_buscorerev(struct si_pub *sih)
1237{
1238 struct si_info *sii = (struct si_info *)sih;
1239 return sii->buscore->id.rev;
1240}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index b51d1e421e24..f84c6f781692 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -17,6 +17,8 @@
17#ifndef _BRCM_AIUTILS_H_ 17#ifndef _BRCM_AIUTILS_H_
18#define _BRCM_AIUTILS_H_ 18#define _BRCM_AIUTILS_H_
19 19
20#include <linux/bcma/bcma.h>
21
20#include "types.h" 22#include "types.h"
21 23
22/* 24/*
@@ -144,26 +146,15 @@
144 * public (read-only) portion of aiutils handle returned by si_attach() 146 * public (read-only) portion of aiutils handle returned by si_attach()
145 */ 147 */
146struct si_pub { 148struct si_pub {
147 uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
148 uint buscorerev; /* buscore rev */
149 uint buscoreidx; /* buscore index */
150 int ccrev; /* chip common core rev */ 149 int ccrev; /* chip common core rev */
151 u32 cccaps; /* chip common capabilities */ 150 u32 cccaps; /* chip common capabilities */
152 u32 cccaps_ext; /* chip common capabilities extension */
153 int pmurev; /* pmu core rev */ 151 int pmurev; /* pmu core rev */
154 u32 pmucaps; /* pmu capabilities */ 152 u32 pmucaps; /* pmu capabilities */
155 uint boardtype; /* board type */ 153 uint boardtype; /* board type */
156 uint boardvendor; /* board vendor */ 154 uint boardvendor; /* board vendor */
157 uint boardflags; /* board flags */
158 uint boardflags2; /* board flags2 */
159 uint chip; /* chip number */ 155 uint chip; /* chip number */
160 uint chiprev; /* chip revision */ 156 uint chiprev; /* chip revision */
161 uint chippkg; /* chip package option */ 157 uint chippkg; /* chip package option */
162 u32 chipst; /* chip status */
163 bool issim; /* chip is in simulation or emulation */
164 uint socirev; /* SOC interconnect rev */
165 bool pci_pr32414;
166
167}; 158};
168 159
169struct pci_dev; 160struct pci_dev;
@@ -179,38 +170,13 @@ struct gpioh_item {
179/* misc si info needed by some of the routines */ 170/* misc si info needed by some of the routines */
180struct si_info { 171struct si_info {
181 struct si_pub pub; /* back plane public state (must be first) */ 172 struct si_pub pub; /* back plane public state (must be first) */
182 struct pci_dev *pbus; /* handle to pci bus */ 173 struct bcma_bus *icbus; /* handle to soc interconnect bus */
183 uint dev_coreid; /* the core provides driver functions */ 174 struct pci_dev *pcibus; /* handle to pci bus */
184 void *intr_arg; /* interrupt callback function arg */
185 u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */
186 /* restore chip interrupts */
187 void (*intrsrestore_fn) (void *intr_arg, u32 arg);
188 /* check if interrupts are enabled */
189 bool (*intrsenabled_fn) (void *intr_arg);
190
191 struct pcicore_info *pch; /* PCI/E core handle */ 175 struct pcicore_info *pch; /* PCI/E core handle */
192 176 struct bcma_device *buscore;
193 struct list_head var_list; /* list of srom variables */ 177 struct list_head var_list; /* list of srom variables */
194 178
195 void __iomem *curmap; /* current regs va */ 179 u32 chipst; /* chip status */
196 void __iomem *regs[SI_MAXCORES]; /* other regs va */
197
198 uint curidx; /* current core index */
199 uint numcores; /* # discovered cores */
200 uint coreid[SI_MAXCORES]; /* id of each core */
201 u32 coresba[SI_MAXCORES]; /* backplane address of each core */
202 void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
203 u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
204 u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
205 u32 coresba2_size[SI_MAXCORES]; /* second address space size */
206
207 void *curwrap; /* current wrapper va */
208 void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
209 u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
210
211 u32 cia[SI_MAXCORES]; /* erom cia entry for each core */
212 u32 cib[SI_MAXCORES]; /* erom cia entry for each core */
213 u32 oob_router; /* oob router registers for axi */
214}; 180};
215 181
216/* 182/*
@@ -223,52 +189,15 @@ struct si_info {
223 189
224 190
225/* AMBA Interconnect exported externs */ 191/* AMBA Interconnect exported externs */
226extern uint ai_flag(struct si_pub *sih); 192extern struct bcma_device *ai_findcore(struct si_pub *sih,
227extern void ai_setint(struct si_pub *sih, int siflag); 193 u16 coreid, u16 coreunit);
228extern uint ai_coreidx(struct si_pub *sih); 194extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
229extern uint ai_corevendor(struct si_pub *sih);
230extern uint ai_corerev(struct si_pub *sih);
231extern bool ai_iscoreup(struct si_pub *sih);
232extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
233extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
234extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
235extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
236 uint val);
237extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
238extern void ai_core_disable(struct si_pub *sih, u32 bits);
239extern int ai_numaddrspaces(struct si_pub *sih);
240extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
241extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
242extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
243 195
244/* === exported functions === */ 196/* === exported functions === */
245extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh); 197extern struct si_pub *ai_attach(struct bcma_bus *pbus);
246extern void ai_detach(struct si_pub *sih); 198extern void ai_detach(struct si_pub *sih);
247extern uint ai_coreid(struct si_pub *sih); 199extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
248extern uint ai_corerev(struct si_pub *sih);
249extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
250 uint val);
251extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
252extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
253extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
254extern bool ai_iscoreup(struct si_pub *sih);
255extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
256extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
257extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
258extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid,
259 uint *origidx, uint *intr_val);
260extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
261extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
262extern void ai_core_disable(struct si_pub *sih, u32 bits);
263extern u32 ai_alp_clock(struct si_pub *sih);
264extern u32 ai_ilp_clock(struct si_pub *sih);
265extern void ai_pci_setup(struct si_pub *sih, uint coremask); 200extern void ai_pci_setup(struct si_pub *sih, uint coremask);
266extern void ai_setint(struct si_pub *sih, int siflag);
267extern bool ai_backplane64(struct si_pub *sih);
268extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
269 void *intrsrestore_fn,
270 void *intrsenabled_fn, void *intr_arg);
271extern void ai_deregister_intr_callback(struct si_pub *sih);
272extern void ai_clkctl_init(struct si_pub *sih); 201extern void ai_clkctl_init(struct si_pub *sih);
273extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih); 202extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
274extern bool ai_clkctl_cc(struct si_pub *sih, uint mode); 203extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
@@ -283,13 +212,6 @@ extern bool ai_is_otp_disabled(struct si_pub *sih);
283/* SPROM availability */ 212/* SPROM availability */
284extern bool ai_is_sprom_available(struct si_pub *sih); 213extern bool ai_is_sprom_available(struct si_pub *sih);
285 214
286/*
287 * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
288 * The returned path is NULL terminated and has trailing '/'.
289 * Return 0 on success, nonzero otherwise.
290 */
291extern int ai_devpath(struct si_pub *sih, char *path, int size);
292
293extern void ai_pci_sleep(struct si_pub *sih); 215extern void ai_pci_sleep(struct si_pub *sih);
294extern void ai_pci_down(struct si_pub *sih); 216extern void ai_pci_down(struct si_pub *sih);
295extern void ai_pci_up(struct si_pub *sih); 217extern void ai_pci_up(struct si_pub *sih);
@@ -299,4 +221,52 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
299/* Enable Ex-PA for 4313 */ 221/* Enable Ex-PA for 4313 */
300extern void ai_epa_4313war(struct si_pub *sih); 222extern void ai_epa_4313war(struct si_pub *sih);
301 223
224extern uint ai_get_buscoretype(struct si_pub *sih);
225extern uint ai_get_buscorerev(struct si_pub *sih);
226
227static inline int ai_get_ccrev(struct si_pub *sih)
228{
229 return sih->ccrev;
230}
231
232static inline u32 ai_get_cccaps(struct si_pub *sih)
233{
234 return sih->cccaps;
235}
236
237static inline int ai_get_pmurev(struct si_pub *sih)
238{
239 return sih->pmurev;
240}
241
242static inline u32 ai_get_pmucaps(struct si_pub *sih)
243{
244 return sih->pmucaps;
245}
246
247static inline uint ai_get_boardtype(struct si_pub *sih)
248{
249 return sih->boardtype;
250}
251
252static inline uint ai_get_boardvendor(struct si_pub *sih)
253{
254 return sih->boardvendor;
255}
256
257static inline uint ai_get_chip_id(struct si_pub *sih)
258{
259 return sih->chip;
260}
261
262static inline uint ai_get_chiprev(struct si_pub *sih)
263{
264 return sih->chiprev;
265}
266
267static inline uint ai_get_chippkg(struct si_pub *sih)
268{
269 return sih->chippkg;
270}
271
302#endif /* _BRCM_AIUTILS_H_ */ 272#endif /* _BRCM_AIUTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 43f7a724dda8..90911eec0cf5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -1118,14 +1118,17 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
1118 u8 status_delay = 0; 1118 u8 status_delay = 0;
1119 1119
1120 /* wait till the next 8 bytes of txstatus is available */ 1120 /* wait till the next 8 bytes of txstatus is available */
1121 while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) { 1121 s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
1122 while ((s1 & TXS_V) == 0) {
1122 udelay(1); 1123 udelay(1);
1123 status_delay++; 1124 status_delay++;
1124 if (status_delay > 10) 1125 if (status_delay > 10)
1125 return; /* error condition */ 1126 return; /* error condition */
1127 s1 = bcma_read32(wlc->hw->d11core,
1128 D11REGOFFS(frmtxstatus));
1126 } 1129 }
1127 1130
1128 s2 = R_REG(&wlc->regs->frmtxstatus2); 1131 s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
1129 } 1132 }
1130 1133
1131 if (scb) { 1134 if (scb) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index ed51616abc85..1948cb2771e9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -430,6 +430,9 @@ struct d11regs {
430 u16 PAD[0x380]; /* 0x800 - 0xEFE */ 430 u16 PAD[0x380]; /* 0x800 - 0xEFE */
431}; 431};
432 432
433/* d11 register field offset */
434#define D11REGOFFS(field) offsetof(struct d11regs, field)
435
433#define PIHR_BASE 0x0400 /* byte address of packed IHR region */ 436#define PIHR_BASE 0x0400 /* byte address of packed IHR region */
434 437
435/* biststatus */ 438/* biststatus */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 0bb8c37e979e..b4cf617276c9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -27,6 +27,13 @@
27#include "soc.h" 27#include "soc.h"
28 28
29/* 29/*
30 * dma register field offset calculation
31 */
32#define DMA64REGOFFS(field) offsetof(struct dma64regs, field)
33#define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field))
34#define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field))
35
36/*
30 * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within 37 * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
31 * a contiguous 8kB physical address. 38 * a contiguous 8kB physical address.
32 */ 39 */
@@ -220,15 +227,16 @@ struct dma_info {
220 uint *msg_level; /* message level pointer */ 227 uint *msg_level; /* message level pointer */
221 char name[MAXNAMEL]; /* callers name for diag msgs */ 228 char name[MAXNAMEL]; /* callers name for diag msgs */
222 229
223 struct pci_dev *pbus; /* bus handle */ 230 struct bcma_device *core;
231 struct device *dmadev;
224 232
225 bool dma64; /* this dma engine is operating in 64-bit mode */ 233 bool dma64; /* this dma engine is operating in 64-bit mode */
226 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ 234 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
227 235
228 /* 64-bit dma tx engine registers */ 236 /* 64-bit dma tx engine registers */
229 struct dma64regs __iomem *d64txregs; 237 uint d64txregbase;
230 /* 64-bit dma rx engine registers */ 238 /* 64-bit dma rx engine registers */
231 struct dma64regs __iomem *d64rxregs; 239 uint d64rxregbase;
232 /* pointer to dma64 tx descriptor ring */ 240 /* pointer to dma64 tx descriptor ring */
233 struct dma64desc *txd64; 241 struct dma64desc *txd64;
234 /* pointer to dma64 rx descriptor ring */ 242 /* pointer to dma64 rx descriptor ring */
@@ -375,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
375 if (dmactrlflags & DMA_CTRL_PEN) { 383 if (dmactrlflags & DMA_CTRL_PEN) {
376 u32 control; 384 u32 control;
377 385
378 control = R_REG(&di->d64txregs->control); 386 control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
379 W_REG(&di->d64txregs->control, 387 bcma_write32(di->core, DMA64TXREGOFFS(di, control),
380 control | D64_XC_PD); 388 control | D64_XC_PD);
381 if (R_REG(&di->d64txregs->control) & D64_XC_PD) 389 if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
390 D64_XC_PD)
382 /* We *can* disable it so it is supported, 391 /* We *can* disable it so it is supported,
383 * restore control register 392 * restore control register
384 */ 393 */
385 W_REG(&di->d64txregs->control, 394 bcma_write32(di->core, DMA64TXREGOFFS(di, control),
386 control); 395 control);
387 else 396 else
388 /* Not supported, don't allow it to be enabled */ 397 /* Not supported, don't allow it to be enabled */
389 dmactrlflags &= ~DMA_CTRL_PEN; 398 dmactrlflags &= ~DMA_CTRL_PEN;
@@ -394,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
394 return dmactrlflags; 403 return dmactrlflags;
395} 404}
396 405
397static bool _dma64_addrext(struct dma64regs __iomem *dma64regs) 406static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
398{ 407{
399 u32 w; 408 u32 w;
400 OR_REG(&dma64regs->control, D64_XC_AE); 409 bcma_set32(di->core, ctrl_offset, D64_XC_AE);
401 w = R_REG(&dma64regs->control); 410 w = bcma_read32(di->core, ctrl_offset);
402 AND_REG(&dma64regs->control, ~D64_XC_AE); 411 bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
403 return (w & D64_XC_AE) == D64_XC_AE; 412 return (w & D64_XC_AE) == D64_XC_AE;
404} 413}
405 414
@@ -412,13 +421,13 @@ static bool _dma_isaddrext(struct dma_info *di)
412 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ 421 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
413 422
414 /* not all tx or rx channel are available */ 423 /* not all tx or rx channel are available */
415 if (di->d64txregs != NULL) { 424 if (di->d64txregbase != 0) {
416 if (!_dma64_addrext(di->d64txregs)) 425 if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
417 DMA_ERROR("%s: DMA64 tx doesn't have AE set\n", 426 DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
418 di->name); 427 di->name);
419 return true; 428 return true;
420 } else if (di->d64rxregs != NULL) { 429 } else if (di->d64rxregbase != 0) {
421 if (!_dma64_addrext(di->d64rxregs)) 430 if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
422 DMA_ERROR("%s: DMA64 rx doesn't have AE set\n", 431 DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
423 di->name); 432 di->name);
424 return true; 433 return true;
@@ -432,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di)
432 u32 addrl; 441 u32 addrl;
433 442
434 /* Check to see if the descriptors need to be aligned on 4K/8K or not */ 443 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
435 if (di->d64txregs != NULL) { 444 if (di->d64txregbase != 0) {
436 W_REG(&di->d64txregs->addrlow, 0xff0); 445 bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
437 addrl = R_REG(&di->d64txregs->addrlow); 446 addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
438 if (addrl != 0) 447 if (addrl != 0)
439 return false; 448 return false;
440 } else if (di->d64rxregs != NULL) { 449 } else if (di->d64rxregbase != 0) {
441 W_REG(&di->d64rxregs->addrlow, 0xff0); 450 bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
442 addrl = R_REG(&di->d64rxregs->addrlow); 451 addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
443 if (addrl != 0) 452 if (addrl != 0)
444 return false; 453 return false;
445 } 454 }
@@ -450,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di)
450 * Descriptor table must start at the DMA hardware dictated alignment, so 459 * Descriptor table must start at the DMA hardware dictated alignment, so
451 * allocated memory must be large enough to support this requirement. 460 * allocated memory must be large enough to support this requirement.
452 */ 461 */
453static void *dma_alloc_consistent(struct pci_dev *pdev, uint size, 462static void *dma_alloc_consistent(struct dma_info *di, uint size,
454 u16 align_bits, uint *alloced, 463 u16 align_bits, uint *alloced,
455 dma_addr_t *pap) 464 dma_addr_t *pap)
456{ 465{
@@ -460,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
460 size += align; 469 size += align;
461 *alloced = size; 470 *alloced = size;
462 } 471 }
463 return pci_alloc_consistent(pdev, size, pap); 472 return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
464} 473}
465 474
466static 475static
@@ -486,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
486 u32 desc_strtaddr; 495 u32 desc_strtaddr;
487 u32 alignbytes = 1 << *alignbits; 496 u32 alignbytes = 1 << *alignbits;
488 497
489 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa); 498 va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
490 499
491 if (NULL == va) 500 if (NULL == va)
492 return NULL; 501 return NULL;
@@ -495,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
495 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr 504 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
496 & boundary)) { 505 & boundary)) {
497 *alignbits = dma_align_sizetobits(size); 506 *alignbits = dma_align_sizetobits(size);
498 pci_free_consistent(di->pbus, size, va, *descpa); 507 dma_free_coherent(di->dmadev, size, va, *descpa);
499 va = dma_alloc_consistent(di->pbus, size, *alignbits, 508 va = dma_alloc_consistent(di, size, *alignbits,
500 alloced, descpa); 509 alloced, descpa);
501 } 510 }
502 return va; 511 return va;
@@ -556,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
556} 565}
557 566
558struct dma_pub *dma_attach(char *name, struct si_pub *sih, 567struct dma_pub *dma_attach(char *name, struct si_pub *sih,
559 void __iomem *dmaregstx, void __iomem *dmaregsrx, 568 struct bcma_device *core,
560 uint ntxd, uint nrxd, 569 uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
561 uint rxbufsize, int rxextheadroom, 570 uint rxbufsize, int rxextheadroom,
562 uint nrxpost, uint rxoffset, uint *msg_level) 571 uint nrxpost, uint rxoffset, uint *msg_level)
563{ 572{
564 struct dma_info *di; 573 struct dma_info *di;
574 u8 rev = core->id.rev;
565 uint size; 575 uint size;
566 576
567 /* allocate private info structure */ 577 /* allocate private info structure */
@@ -572,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
572 di->msg_level = msg_level ? msg_level : &dma_msg_level; 582 di->msg_level = msg_level ? msg_level : &dma_msg_level;
573 583
574 584
575 di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); 585 di->dma64 =
586 ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
576 587
577 /* init dma reg pointer */ 588 /* init dma reg info */
578 di->d64txregs = (struct dma64regs __iomem *) dmaregstx; 589 di->core = core;
579 di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx; 590 di->d64txregbase = txregbase;
591 di->d64rxregbase = rxregbase;
580 592
581 /* 593 /*
582 * Default flags (which can be changed by the driver calling 594 * Default flags (which can be changed by the driver calling
@@ -585,16 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
585 */ 597 */
586 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); 598 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
587 599
588 DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", 600 DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
589 name, "DMA64", 601 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
602 "txregbase %u rxregbase %u\n", name, "DMA64",
590 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, 603 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
591 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx); 604 rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
592 605
593 /* make a private copy of our callers name */ 606 /* make a private copy of our callers name */
594 strncpy(di->name, name, MAXNAMEL); 607 strncpy(di->name, name, MAXNAMEL);
595 di->name[MAXNAMEL - 1] = '\0'; 608 di->name[MAXNAMEL - 1] = '\0';
596 609
597 di->pbus = ((struct si_info *)sih)->pbus; 610 di->dmadev = core->dma_dev;
598 611
599 /* save tunables */ 612 /* save tunables */
600 di->ntxd = (u16) ntxd; 613 di->ntxd = (u16) ntxd;
@@ -626,11 +639,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
626 di->dataoffsetlow = di->ddoffsetlow; 639 di->dataoffsetlow = di->ddoffsetlow;
627 di->dataoffsethigh = di->ddoffsethigh; 640 di->dataoffsethigh = di->ddoffsethigh;
628 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ 641 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
629 if ((ai_coreid(sih) == SDIOD_CORE_ID) 642 if ((core->id.id == SDIOD_CORE_ID)
630 && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) 643 && ((rev > 0) && (rev <= 2)))
631 di->addrext = 0; 644 di->addrext = 0;
632 else if ((ai_coreid(sih) == I2S_CORE_ID) && 645 else if ((core->id.id == I2S_CORE_ID) &&
633 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) 646 ((rev == 0) || (rev == 1)))
634 di->addrext = 0; 647 di->addrext = 0;
635 else 648 else
636 di->addrext = _dma_isaddrext(di); 649 di->addrext = _dma_isaddrext(di);
@@ -749,13 +762,13 @@ void dma_detach(struct dma_pub *pub)
749 762
750 /* free dma descriptor rings */ 763 /* free dma descriptor rings */
751 if (di->txd64) 764 if (di->txd64)
752 pci_free_consistent(di->pbus, di->txdalloc, 765 dma_free_coherent(di->dmadev, di->txdalloc,
753 ((s8 *)di->txd64 - di->txdalign), 766 ((s8 *)di->txd64 - di->txdalign),
754 (di->txdpaorig)); 767 (di->txdpaorig));
755 if (di->rxd64) 768 if (di->rxd64)
756 pci_free_consistent(di->pbus, di->rxdalloc, 769 dma_free_coherent(di->dmadev, di->rxdalloc,
757 ((s8 *)di->rxd64 - di->rxdalign), 770 ((s8 *)di->rxd64 - di->rxdalign),
758 (di->rxdpaorig)); 771 (di->rxdpaorig));
759 772
760 /* free packet pointer vectors */ 773 /* free packet pointer vectors */
761 kfree(di->txp); 774 kfree(di->txp);
@@ -780,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
780 if ((di->ddoffsetlow == 0) 793 if ((di->ddoffsetlow == 0)
781 || !(pa & PCI32ADDR_HIGH)) { 794 || !(pa & PCI32ADDR_HIGH)) {
782 if (direction == DMA_TX) { 795 if (direction == DMA_TX) {
783 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); 796 bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
784 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); 797 pa + di->ddoffsetlow);
798 bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
799 di->ddoffsethigh);
785 } else { 800 } else {
786 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); 801 bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
787 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); 802 pa + di->ddoffsetlow);
803 bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
804 di->ddoffsethigh);
788 } 805 }
789 } else { 806 } else {
790 /* DMA64 32bits address extension */ 807 /* DMA64 32bits address extension */
@@ -795,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
795 pa &= ~PCI32ADDR_HIGH; 812 pa &= ~PCI32ADDR_HIGH;
796 813
797 if (direction == DMA_TX) { 814 if (direction == DMA_TX) {
798 W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); 815 bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
799 W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); 816 pa + di->ddoffsetlow);
800 SET_REG(&di->d64txregs->control, 817 bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
801 D64_XC_AE, (ae << D64_XC_AE_SHIFT)); 818 di->ddoffsethigh);
819 bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
820 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
802 } else { 821 } else {
803 W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); 822 bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
804 W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); 823 pa + di->ddoffsetlow);
805 SET_REG(&di->d64rxregs->control, 824 bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
806 D64_RC_AE, (ae << D64_RC_AE_SHIFT)); 825 di->ddoffsethigh);
826 bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
827 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
807 } 828 }
808 } 829 }
809} 830}
@@ -815,9 +836,9 @@ static void _dma_rxenable(struct dma_info *di)
815 836
816 DMA_TRACE("%s:\n", di->name); 837 DMA_TRACE("%s:\n", di->name);
817 838
818 control = 839 control = D64_RC_RE | (bcma_read32(di->core,
819 (R_REG(&di->d64rxregs->control) & D64_RC_AE) | 840 DMA64RXREGOFFS(di, control)) &
820 D64_RC_RE; 841 D64_RC_AE);
821 842
822 if ((dmactrlflags & DMA_CTRL_PEN) == 0) 843 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
823 control |= D64_RC_PD; 844 control |= D64_RC_PD;
@@ -825,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di)
825 if (dmactrlflags & DMA_CTRL_ROC) 846 if (dmactrlflags & DMA_CTRL_ROC)
826 control |= D64_RC_OC; 847 control |= D64_RC_OC;
827 848
828 W_REG(&di->d64rxregs->control, 849 bcma_write32(di->core, DMA64RXREGOFFS(di, control),
829 ((di->rxoffset << D64_RC_RO_SHIFT) | control)); 850 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
830} 851}
831 852
@@ -868,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
868 return NULL; 889 return NULL;
869 890
870 curr = 891 curr =
871 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) - 892 B2I(((bcma_read32(di->core,
893 DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
872 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); 894 di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
873 895
874 /* ignore curr if forceall */ 896 /* ignore curr if forceall */
@@ -882,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
882 pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; 904 pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
883 905
884 /* clear this packet from the descriptor ring */ 906 /* clear this packet from the descriptor ring */
885 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE); 907 dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
886 908
887 di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); 909 di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
888 di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); 910 di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
@@ -950,12 +972,12 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
950 if (resid > 0) { 972 if (resid > 0) {
951 uint cur; 973 uint cur;
952 cur = 974 cur =
953 B2I(((R_REG(&di->d64rxregs->status0) & 975 B2I(((bcma_read32(di->core,
954 D64_RS0_CD_MASK) - 976 DMA64RXREGOFFS(di, status0)) &
955 di->rcvptrbase) & D64_RS0_CD_MASK, 977 D64_RS0_CD_MASK) - di->rcvptrbase) &
956 struct dma64desc); 978 D64_RS0_CD_MASK, struct dma64desc);
957 DMA_ERROR("rxin %d rxout %d, hw_curr %d\n", 979 DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
958 di->rxin, di->rxout, cur); 980 di->rxin, di->rxout, cur);
959 } 981 }
960#endif /* BCMDBG */ 982#endif /* BCMDBG */
961 983
@@ -983,8 +1005,10 @@ static bool dma64_rxidle(struct dma_info *di)
983 if (di->nrxd == 0) 1005 if (di->nrxd == 0)
984 return true; 1006 return true;
985 1007
986 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) == 1008 return ((bcma_read32(di->core,
987 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK)); 1009 DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
1010 (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
1011 D64_RS0_CD_MASK));
988} 1012}
989 1013
990/* 1014/*
@@ -1048,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub)
1048 */ 1072 */
1049 *(u32 *) (p->data) = 0; 1073 *(u32 *) (p->data) = 0;
1050 1074
1051 pa = pci_map_single(di->pbus, p->data, 1075 pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
1052 di->rxbufsize, PCI_DMA_FROMDEVICE); 1076 DMA_FROM_DEVICE);
1053 1077
1054 /* save the free packet pointer */ 1078 /* save the free packet pointer */
1055 di->rxp[rxout] = p; 1079 di->rxp[rxout] = p;
@@ -1067,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub)
1067 di->rxout = rxout; 1091 di->rxout = rxout;
1068 1092
1069 /* update the chip lastdscr pointer */ 1093 /* update the chip lastdscr pointer */
1070 W_REG(&di->d64rxregs->ptr, 1094 bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
1071 di->rcvptrbase + I2B(rxout, struct dma64desc)); 1095 di->rcvptrbase + I2B(rxout, struct dma64desc));
1072 1096
1073 return ring_empty; 1097 return ring_empty;
@@ -1128,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub)
1128 1152
1129 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) 1153 if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
1130 control |= D64_XC_PD; 1154 control |= D64_XC_PD;
1131 OR_REG(&di->d64txregs->control, control); 1155 bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
1132 1156
1133 /* DMA engine with alignment requirement requires table to be inited 1157 /* DMA engine with alignment requirement requires table to be inited
1134 * before enabling the engine 1158 * before enabling the engine
@@ -1146,7 +1170,7 @@ void dma_txsuspend(struct dma_pub *pub)
1146 if (di->ntxd == 0) 1170 if (di->ntxd == 0)
1147 return; 1171 return;
1148 1172
1149 OR_REG(&di->d64txregs->control, D64_XC_SE); 1173 bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
1150} 1174}
1151 1175
1152void dma_txresume(struct dma_pub *pub) 1176void dma_txresume(struct dma_pub *pub)
@@ -1158,7 +1182,7 @@ void dma_txresume(struct dma_pub *pub)
1158 if (di->ntxd == 0) 1182 if (di->ntxd == 0)
1159 return; 1183 return;
1160 1184
1161 AND_REG(&di->d64txregs->control, ~D64_XC_SE); 1185 bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
1162} 1186}
1163 1187
1164bool dma_txsuspended(struct dma_pub *pub) 1188bool dma_txsuspended(struct dma_pub *pub)
@@ -1166,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub)
1166 struct dma_info *di = (struct dma_info *)pub; 1190 struct dma_info *di = (struct dma_info *)pub;
1167 1191
1168 return (di->ntxd == 0) || 1192 return (di->ntxd == 0) ||
1169 ((R_REG(&di->d64txregs->control) & D64_XC_SE) == 1193 ((bcma_read32(di->core,
1170 D64_XC_SE); 1194 DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
1195 D64_XC_SE);
1171} 1196}
1172 1197
1173void dma_txreclaim(struct dma_pub *pub, enum txd_range range) 1198void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
@@ -1200,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub)
1200 return true; 1225 return true;
1201 1226
1202 /* suspend tx DMA first */ 1227 /* suspend tx DMA first */
1203 W_REG(&di->d64txregs->control, D64_XC_SE); 1228 bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
1204 SPINWAIT(((status = 1229 SPINWAIT(((status =
1205 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) 1230 (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
1206 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) 1231 D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
1207 && (status != D64_XS0_XS_STOPPED), 10000); 1232 (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
1233 10000);
1208 1234
1209 W_REG(&di->d64txregs->control, 0); 1235 bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
1210 SPINWAIT(((status = 1236 SPINWAIT(((status =
1211 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) 1237 (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
1212 != D64_XS0_XS_DISABLED), 10000); 1238 D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
1213 1239
1214 /* wait for the last transaction to complete */ 1240 /* wait for the last transaction to complete */
1215 udelay(300); 1241 udelay(300);
@@ -1225,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub)
1225 if (di->nrxd == 0) 1251 if (di->nrxd == 0)
1226 return true; 1252 return true;
1227 1253
1228 W_REG(&di->d64rxregs->control, 0); 1254 bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
1229 SPINWAIT(((status = 1255 SPINWAIT(((status =
1230 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) 1256 (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
1231 != D64_RS0_RS_DISABLED), 10000); 1257 D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
1232 1258
1233 return status == D64_RS0_RS_DISABLED; 1259 return status == D64_RS0_RS_DISABLED;
1234} 1260}
@@ -1239,10 +1265,9 @@ bool dma_rxreset(struct dma_pub *pub)
1239 * the error(toss frames) could be fatal and cause many subsequent hard 1265 * the error(toss frames) could be fatal and cause many subsequent hard
1240 * to debug problems 1266 * to debug problems
1241 */ 1267 */
1242int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit) 1268int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
1243{ 1269{
1244 struct dma_info *di = (struct dma_info *)pub; 1270 struct dma_info *di = (struct dma_info *)pub;
1245 struct sk_buff *p, *next;
1246 unsigned char *data; 1271 unsigned char *data;
1247 uint len; 1272 uint len;
1248 u16 txout; 1273 u16 txout;
@@ -1254,57 +1279,44 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
1254 txout = di->txout; 1279 txout = di->txout;
1255 1280
1256 /* 1281 /*
1257 * Walk the chain of packet buffers 1282 * obtain and initialize transmit descriptor entry.
1258 * allocating and initializing transmit descriptor entries.
1259 */ 1283 */
1260 for (p = p0; p; p = next) { 1284 data = p->data;
1261 data = p->data; 1285 len = p->len;
1262 len = p->len;
1263 next = p->next;
1264
1265 /* return nonzero if out of tx descriptors */
1266 if (nexttxd(di, txout) == di->txin)
1267 goto outoftxd;
1268
1269 if (len == 0)
1270 continue;
1271 1286
1272 /* get physical address of buffer start */ 1287 /* no use to transmit a zero length packet */
1273 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE); 1288 if (len == 0)
1289 return 0;
1274 1290
1275 flags = 0; 1291 /* return nonzero if out of tx descriptors */
1276 if (p == p0) 1292 if (nexttxd(di, txout) == di->txin)
1277 flags |= D64_CTRL1_SOF; 1293 goto outoftxd;
1278 1294
1279 /* With a DMA segment list, Descriptor table is filled 1295 /* get physical address of buffer start */
1280 * using the segment list instead of looping over 1296 pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
1281 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
1282 * is when end of segment list is reached.
1283 */
1284 if (next == NULL)
1285 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1286 if (txout == (di->ntxd - 1))
1287 flags |= D64_CTRL1_EOT;
1288 1297
1289 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); 1298 /* With a DMA segment list, Descriptor table is filled
1299 * using the segment list instead of looping over
1300 * buffers in multi-chain DMA. Therefore, EOF for SGLIST
1301 * is when end of segment list is reached.
1302 */
1303 flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
1304 if (txout == (di->ntxd - 1))
1305 flags |= D64_CTRL1_EOT;
1290 1306
1291 txout = nexttxd(di, txout); 1307 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1292 }
1293 1308
1294 /* if last txd eof not set, fix it */ 1309 txout = nexttxd(di, txout);
1295 if (!(flags & D64_CTRL1_EOF))
1296 di->txd64[prevtxd(di, txout)].ctrl1 =
1297 cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
1298 1310
1299 /* save the packet */ 1311 /* save the packet */
1300 di->txp[prevtxd(di, txout)] = p0; 1312 di->txp[prevtxd(di, txout)] = p;
1301 1313
1302 /* bump the tx descriptor index */ 1314 /* bump the tx descriptor index */
1303 di->txout = txout; 1315 di->txout = txout;
1304 1316
1305 /* kick the chip */ 1317 /* kick the chip */
1306 if (commit) 1318 if (commit)
1307 W_REG(&di->d64txregs->ptr, 1319 bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
1308 di->xmtptrbase + I2B(txout, struct dma64desc)); 1320 di->xmtptrbase + I2B(txout, struct dma64desc));
1309 1321
1310 /* tx flow control */ 1322 /* tx flow control */
@@ -1314,7 +1326,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
1314 1326
1315 outoftxd: 1327 outoftxd:
1316 DMA_ERROR("%s: out of txds !!!\n", di->name); 1328 DMA_ERROR("%s: out of txds !!!\n", di->name);
1317 brcmu_pkt_buf_free_skb(p0); 1329 brcmu_pkt_buf_free_skb(p);
1318 di->dma.txavail = 0; 1330 di->dma.txavail = 0;
1319 di->dma.txnobuf++; 1331 di->dma.txnobuf++;
1320 return -1; 1332 return -1;
@@ -1352,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1352 if (range == DMA_RANGE_ALL) 1364 if (range == DMA_RANGE_ALL)
1353 end = di->txout; 1365 end = di->txout;
1354 else { 1366 else {
1355 struct dma64regs __iomem *dregs = di->d64txregs; 1367 end = (u16) (B2I(((bcma_read32(di->core,
1356 1368 DMA64TXREGOFFS(di, status0)) &
1357 end = (u16) (B2I(((R_REG(&dregs->status0) & 1369 D64_XS0_CD_MASK) - di->xmtptrbase) &
1358 D64_XS0_CD_MASK) - 1370 D64_XS0_CD_MASK, struct dma64desc));
1359 di->xmtptrbase) & D64_XS0_CD_MASK,
1360 struct dma64desc));
1361 1371
1362 if (range == DMA_RANGE_TRANSFERED) { 1372 if (range == DMA_RANGE_TRANSFERED) {
1363 active_desc = 1373 active_desc =
1364 (u16) (R_REG(&dregs->status1) & 1374 (u16)(bcma_read32(di->core,
1375 DMA64TXREGOFFS(di, status1)) &
1365 D64_XS1_AD_MASK); 1376 D64_XS1_AD_MASK);
1366 active_desc = 1377 active_desc =
1367 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; 1378 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
@@ -1390,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1390 txp = di->txp[i]; 1401 txp = di->txp[i];
1391 di->txp[i] = NULL; 1402 di->txp[i] = NULL;
1392 1403
1393 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE); 1404 dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
1394 } 1405 }
1395 1406
1396 di->txin = i; 1407 di->txin = i;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
index d317c7c12f91..cc269ee5c499 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -75,10 +75,11 @@ struct dma_pub {
75}; 75};
76 76
77extern struct dma_pub *dma_attach(char *name, struct si_pub *sih, 77extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
78 void __iomem *dmaregstx, void __iomem *dmaregsrx, 78 struct bcma_device *d11core,
79 uint ntxd, uint nrxd, 79 uint txregbase, uint rxregbase,
80 uint rxbufsize, int rxextheadroom, 80 uint ntxd, uint nrxd,
81 uint nrxpost, uint rxoffset, uint *msg_level); 81 uint rxbufsize, int rxextheadroom,
82 uint nrxpost, uint rxoffset, uint *msg_level);
82 83
83void dma_rxinit(struct dma_pub *pub); 84void dma_rxinit(struct dma_pub *pub);
84int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list); 85int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 6d3c7b6c5aa0..77fdc45b43ef 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -17,10 +17,11 @@
17#define __UNDEF_NO_VERSION__ 17#define __UNDEF_NO_VERSION__
18 18
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/pci.h>
21#include <linux/sched.h> 20#include <linux/sched.h>
22#include <linux/firmware.h> 21#include <linux/firmware.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/bcma/bcma.h>
24#include <net/mac80211.h> 25#include <net/mac80211.h>
25#include <defs.h> 26#include <defs.h>
26#include "nicpci.h" 27#include "nicpci.h"
@@ -39,10 +40,10 @@
39#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \ 40#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
40 FIF_ALLMULTI | \ 41 FIF_ALLMULTI | \
41 FIF_FCSFAIL | \ 42 FIF_FCSFAIL | \
42 FIF_PLCPFAIL | \
43 FIF_CONTROL | \ 43 FIF_CONTROL | \
44 FIF_OTHER_BSS | \ 44 FIF_OTHER_BSS | \
45 FIF_BCN_PRBRESP_PROMISC) 45 FIF_BCN_PRBRESP_PROMISC | \
46 FIF_PSPOLL)
46 47
47#define CHAN2GHZ(channel, freqency, chflags) { \ 48#define CHAN2GHZ(channel, freqency, chflags) { \
48 .band = IEEE80211_BAND_2GHZ, \ 49 .band = IEEE80211_BAND_2GHZ, \
@@ -86,16 +87,14 @@ MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
86MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); 87MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
87MODULE_LICENSE("Dual BSD/GPL"); 88MODULE_LICENSE("Dual BSD/GPL");
88 89
89/* recognized PCI IDs */
90static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
91 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */
92 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */
93 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */
94 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */
95 {0}
96};
97 90
98MODULE_DEVICE_TABLE(pci, brcms_pci_id_table); 91/* recognized BCMA Core IDs */
92static struct bcma_device_id brcms_coreid_table[] = {
93 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
94 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
95 BCMA_CORETABLE_END
96};
97MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
99 98
100#ifdef BCMDBG 99#ifdef BCMDBG
101static int msglevel = 0xdeadbeef; 100static int msglevel = 0xdeadbeef;
@@ -372,7 +371,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
372 conf->listen_interval); 371 conf->listen_interval);
373 } 372 }
374 if (changed & IEEE80211_CONF_CHANGE_MONITOR) 373 if (changed & IEEE80211_CONF_CHANGE_MONITOR)
375 wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n", 374 wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
376 __func__, conf->flags & IEEE80211_CONF_MONITOR ? 375 __func__, conf->flags & IEEE80211_CONF_MONITOR ?
377 "true" : "false"); 376 "true" : "false");
378 if (changed & IEEE80211_CONF_CHANGE_PS) 377 if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -549,29 +548,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
549 548
550 changed_flags &= MAC_FILTERS; 549 changed_flags &= MAC_FILTERS;
551 *total_flags &= MAC_FILTERS; 550 *total_flags &= MAC_FILTERS;
551
552 if (changed_flags & FIF_PROMISC_IN_BSS) 552 if (changed_flags & FIF_PROMISC_IN_BSS)
553 wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n"); 553 wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
554 if (changed_flags & FIF_ALLMULTI) 554 if (changed_flags & FIF_ALLMULTI)
555 wiphy_err(wiphy, "FIF_ALLMULTI\n"); 555 wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
556 if (changed_flags & FIF_FCSFAIL) 556 if (changed_flags & FIF_FCSFAIL)
557 wiphy_err(wiphy, "FIF_FCSFAIL\n"); 557 wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
558 if (changed_flags & FIF_PLCPFAIL)
559 wiphy_err(wiphy, "FIF_PLCPFAIL\n");
560 if (changed_flags & FIF_CONTROL) 558 if (changed_flags & FIF_CONTROL)
561 wiphy_err(wiphy, "FIF_CONTROL\n"); 559 wiphy_dbg(wiphy, "FIF_CONTROL\n");
562 if (changed_flags & FIF_OTHER_BSS) 560 if (changed_flags & FIF_OTHER_BSS)
563 wiphy_err(wiphy, "FIF_OTHER_BSS\n"); 561 wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
564 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 562 if (changed_flags & FIF_PSPOLL)
565 spin_lock_bh(&wl->lock); 563 wiphy_dbg(wiphy, "FIF_PSPOLL\n");
566 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) { 564 if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
567 wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS; 565 wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
568 brcms_c_mac_bcn_promisc_change(wl->wlc, 1); 566
569 } else { 567 spin_lock_bh(&wl->lock);
570 brcms_c_mac_bcn_promisc_change(wl->wlc, 0); 568 brcms_c_mac_promisc(wl->wlc, *total_flags);
571 wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS; 569 spin_unlock_bh(&wl->lock);
572 }
573 spin_unlock_bh(&wl->lock);
574 }
575 return; 570 return;
576} 571}
577 572
@@ -727,7 +722,7 @@ static const struct ieee80211_ops brcms_ops = {
727}; 722};
728 723
729/* 724/*
730 * is called in brcms_pci_probe() context, therefore no locking required. 725 * is called in brcms_bcma_probe() context, therefore no locking required.
731 */ 726 */
732static int brcms_set_hint(struct brcms_info *wl, char *abbrev) 727static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
733{ 728{
@@ -867,25 +862,15 @@ static void brcms_free(struct brcms_info *wl)
867#endif 862#endif
868 kfree(t); 863 kfree(t);
869 } 864 }
870
871 /*
872 * unregister_netdev() calls get_stats() which may read chip
873 * registers so we cannot unmap the chip registers until
874 * after calling unregister_netdev() .
875 */
876 if (wl->regsva)
877 iounmap(wl->regsva);
878
879 wl->regsva = NULL;
880} 865}
881 866
882/* 867/*
883* called from both kernel as from this kernel module (error flow on attach) 868* called from both kernel as from this kernel module (error flow on attach)
884* precondition: perimeter lock is not acquired. 869* precondition: perimeter lock is not acquired.
885*/ 870*/
886static void brcms_remove(struct pci_dev *pdev) 871static void brcms_remove(struct bcma_device *pdev)
887{ 872{
888 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 873 struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
889 struct brcms_info *wl = hw->priv; 874 struct brcms_info *wl = hw->priv;
890 875
891 if (wl->wlc) { 876 if (wl->wlc) {
@@ -893,11 +878,10 @@ static void brcms_remove(struct pci_dev *pdev)
893 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); 878 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
894 ieee80211_unregister_hw(hw); 879 ieee80211_unregister_hw(hw);
895 } 880 }
896 pci_disable_device(pdev);
897 881
898 brcms_free(wl); 882 brcms_free(wl);
899 883
900 pci_set_drvdata(pdev, NULL); 884 bcma_set_drvdata(pdev, NULL);
901 ieee80211_free_hw(hw); 885 ieee80211_free_hw(hw);
902} 886}
903 887
@@ -1005,11 +989,9 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
1005 * it as static. 989 * it as static.
1006 * 990 *
1007 * 991 *
1008 * is called in brcms_pci_probe() context, therefore no locking required. 992 * is called in brcms_bcma_probe() context, therefore no locking required.
1009 */ 993 */
1010static struct brcms_info *brcms_attach(u16 vendor, u16 device, 994static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1011 resource_size_t regs,
1012 struct pci_dev *btparam, uint irq)
1013{ 995{
1014 struct brcms_info *wl = NULL; 996 struct brcms_info *wl = NULL;
1015 int unit, err; 997 int unit, err;
@@ -1023,7 +1005,7 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
1023 return NULL; 1005 return NULL;
1024 1006
1025 /* allocate private info */ 1007 /* allocate private info */
1026 hw = pci_get_drvdata(btparam); /* btparam == pdev */ 1008 hw = bcma_get_drvdata(pdev);
1027 if (hw != NULL) 1009 if (hw != NULL)
1028 wl = hw->priv; 1010 wl = hw->priv;
1029 if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL)) 1011 if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
@@ -1035,26 +1017,20 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
1035 /* setup the bottom half handler */ 1017 /* setup the bottom half handler */
1036 tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl); 1018 tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
1037 1019
1038 wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ);
1039 if (wl->regsva == NULL) {
1040 wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit);
1041 goto fail;
1042 }
1043 spin_lock_init(&wl->lock); 1020 spin_lock_init(&wl->lock);
1044 spin_lock_init(&wl->isr_lock); 1021 spin_lock_init(&wl->isr_lock);
1045 1022
1046 /* prepare ucode */ 1023 /* prepare ucode */
1047 if (brcms_request_fw(wl, btparam) < 0) { 1024 if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) {
1048 wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in " 1025 wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
1049 "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm"); 1026 "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
1050 brcms_release_fw(wl); 1027 brcms_release_fw(wl);
1051 brcms_remove(btparam); 1028 brcms_remove(pdev);
1052 return NULL; 1029 return NULL;
1053 } 1030 }
1054 1031
1055 /* common load-time initialization */ 1032 /* common load-time initialization */
1056 wl->wlc = brcms_c_attach(wl, vendor, device, unit, false, 1033 wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
1057 wl->regsva, btparam, &err);
1058 brcms_release_fw(wl); 1034 brcms_release_fw(wl);
1059 if (!wl->wlc) { 1035 if (!wl->wlc) {
1060 wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", 1036 wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
@@ -1066,11 +1042,12 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
1066 wl->pub->ieee_hw = hw; 1042 wl->pub->ieee_hw = hw;
1067 1043
1068 /* register our interrupt handler */ 1044 /* register our interrupt handler */
1069 if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) { 1045 if (request_irq(pdev->bus->host_pci->irq, brcms_isr,
1046 IRQF_SHARED, KBUILD_MODNAME, wl)) {
1070 wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit); 1047 wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
1071 goto fail; 1048 goto fail;
1072 } 1049 }
1073 wl->irq = irq; 1050 wl->irq = pdev->bus->host_pci->irq;
1074 1051
1075 /* register module */ 1052 /* register module */
1076 brcms_c_module_register(wl->pub, "linux", wl, NULL); 1053 brcms_c_module_register(wl->pub, "linux", wl, NULL);
@@ -1117,37 +1094,18 @@ fail:
1117 * 1094 *
1118 * Perimeter lock is initialized in the course of this function. 1095 * Perimeter lock is initialized in the course of this function.
1119 */ 1096 */
1120static int __devinit 1097static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
1121brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1122{ 1098{
1123 int rc;
1124 struct brcms_info *wl; 1099 struct brcms_info *wl;
1125 struct ieee80211_hw *hw; 1100 struct ieee80211_hw *hw;
1126 u32 val;
1127
1128 dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n",
1129 pdev->bus->number, PCI_SLOT(pdev->devfn),
1130 PCI_FUNC(pdev->devfn), pdev->irq);
1131 1101
1132 if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) || 1102 dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
1133 ((pdev->device != 0x0576) && 1103 pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
1134 ((pdev->device & 0xff00) != 0x4300) && 1104 pdev->bus->host_pci->irq);
1135 ((pdev->device & 0xff00) != 0x4700) &&
1136 ((pdev->device < 43000) || (pdev->device > 43999))))
1137 return -ENODEV;
1138 1105
1139 rc = pci_enable_device(pdev); 1106 if ((pdev->id.manuf != BCMA_MANUF_BCM) ||
1140 if (rc) { 1107 (pdev->id.id != BCMA_CORE_80211))
1141 pr_err("%s: Cannot enable device %d-%d_%d\n",
1142 __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
1143 PCI_FUNC(pdev->devfn));
1144 return -ENODEV; 1108 return -ENODEV;
1145 }
1146 pci_set_master(pdev);
1147
1148 pci_read_config_dword(pdev, 0x40, &val);
1149 if ((val & 0x0000ff00) != 0)
1150 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1151 1109
1152 hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops); 1110 hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
1153 if (!hw) { 1111 if (!hw) {
@@ -1157,14 +1115,11 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1157 1115
1158 SET_IEEE80211_DEV(hw, &pdev->dev); 1116 SET_IEEE80211_DEV(hw, &pdev->dev);
1159 1117
1160 pci_set_drvdata(pdev, hw); 1118 bcma_set_drvdata(pdev, hw);
1161 1119
1162 memset(hw->priv, 0, sizeof(*wl)); 1120 memset(hw->priv, 0, sizeof(*wl));
1163 1121
1164 wl = brcms_attach(pdev->vendor, pdev->device, 1122 wl = brcms_attach(pdev);
1165 pci_resource_start(pdev, 0), pdev,
1166 pdev->irq);
1167
1168 if (!wl) { 1123 if (!wl) {
1169 pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME, 1124 pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
1170 __func__); 1125 __func__);
@@ -1173,16 +1128,23 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1173 return 0; 1128 return 0;
1174} 1129}
1175 1130
1176static int brcms_suspend(struct pci_dev *pdev, pm_message_t state) 1131static int brcms_pci_suspend(struct pci_dev *pdev)
1132{
1133 pci_save_state(pdev);
1134 pci_disable_device(pdev);
1135 return pci_set_power_state(pdev, PCI_D3hot);
1136}
1137
1138static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
1177{ 1139{
1178 struct brcms_info *wl; 1140 struct brcms_info *wl;
1179 struct ieee80211_hw *hw; 1141 struct ieee80211_hw *hw;
1180 1142
1181 hw = pci_get_drvdata(pdev); 1143 hw = bcma_get_drvdata(pdev);
1182 wl = hw->priv; 1144 wl = hw->priv;
1183 if (!wl) { 1145 if (!wl) {
1184 wiphy_err(wl->wiphy, 1146 wiphy_err(wl->wiphy,
1185 "brcms_suspend: pci_get_drvdata failed\n"); 1147 "brcms_suspend: bcma_get_drvdata failed\n");
1186 return -ENODEV; 1148 return -ENODEV;
1187 } 1149 }
1188 1150
@@ -1191,25 +1153,14 @@ static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
1191 wl->pub->hw_up = false; 1153 wl->pub->hw_up = false;
1192 spin_unlock_bh(&wl->lock); 1154 spin_unlock_bh(&wl->lock);
1193 1155
1194 pci_save_state(pdev); 1156 /* temporarily do suspend ourselves */
1195 pci_disable_device(pdev); 1157 return brcms_pci_suspend(pdev->bus->host_pci);
1196 return pci_set_power_state(pdev, PCI_D3hot);
1197} 1158}
1198 1159
1199static int brcms_resume(struct pci_dev *pdev) 1160static int brcms_pci_resume(struct pci_dev *pdev)
1200{ 1161{
1201 struct brcms_info *wl;
1202 struct ieee80211_hw *hw;
1203 int err = 0; 1162 int err = 0;
1204 u32 val; 1163 uint val;
1205
1206 hw = pci_get_drvdata(pdev);
1207 wl = hw->priv;
1208 if (!wl) {
1209 wiphy_err(wl->wiphy,
1210 "wl: brcms_resume: pci_get_drvdata failed\n");
1211 return -ENODEV;
1212 }
1213 1164
1214 err = pci_set_power_state(pdev, PCI_D0); 1165 err = pci_set_power_state(pdev, PCI_D0);
1215 if (err) 1166 if (err)
@@ -1227,24 +1178,28 @@ static int brcms_resume(struct pci_dev *pdev)
1227 if ((val & 0x0000ff00) != 0) 1178 if ((val & 0x0000ff00) != 0)
1228 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 1179 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1229 1180
1181 return 0;
1182}
1183
1184static int brcms_resume(struct bcma_device *pdev)
1185{
1230 /* 1186 /*
1231 * done. driver will be put in up state 1187 * just do pci resume for now until bcma supports it.
1232 * in brcms_ops_add_interface() call.
1233 */ 1188 */
1234 return err; 1189 return brcms_pci_resume(pdev->bus->host_pci);
1235} 1190}
1236 1191
1237static struct pci_driver brcms_pci_driver = { 1192static struct bcma_driver brcms_bcma_driver = {
1238 .name = KBUILD_MODNAME, 1193 .name = KBUILD_MODNAME,
1239 .probe = brcms_pci_probe, 1194 .probe = brcms_bcma_probe,
1240 .suspend = brcms_suspend, 1195 .suspend = brcms_suspend,
1241 .resume = brcms_resume, 1196 .resume = brcms_resume,
1242 .remove = __devexit_p(brcms_remove), 1197 .remove = __devexit_p(brcms_remove),
1243 .id_table = brcms_pci_id_table, 1198 .id_table = brcms_coreid_table,
1244}; 1199};
1245 1200
1246/** 1201/**
1247 * This is the main entry point for the WL driver. 1202 * This is the main entry point for the brcmsmac driver.
1248 * 1203 *
1249 * This function determines if a device pointed to by pdev is a WL device, 1204 * This function determines if a device pointed to by pdev is a WL device,
1250 * and if so, performs a brcms_attach() on it. 1205 * and if so, performs a brcms_attach() on it.
@@ -1259,26 +1214,24 @@ static int __init brcms_module_init(void)
1259 brcm_msg_level = msglevel; 1214 brcm_msg_level = msglevel;
1260#endif /* BCMDBG */ 1215#endif /* BCMDBG */
1261 1216
1262 error = pci_register_driver(&brcms_pci_driver); 1217 error = bcma_driver_register(&brcms_bcma_driver);
1218 printk(KERN_ERR "%s: register returned %d\n", __func__, error);
1263 if (!error) 1219 if (!error)
1264 return 0; 1220 return 0;
1265 1221
1266
1267
1268 return error; 1222 return error;
1269} 1223}
1270 1224
1271/** 1225/**
1272 * This function unloads the WL driver from the system. 1226 * This function unloads the brcmsmac driver from the system.
1273 * 1227 *
1274 * This function unconditionally unloads the WL driver module from the 1228 * This function unconditionally unloads the brcmsmac driver module from the
1275 * system. 1229 * system.
1276 * 1230 *
1277 */ 1231 */
1278static void __exit brcms_module_exit(void) 1232static void __exit brcms_module_exit(void)
1279{ 1233{
1280 pci_unregister_driver(&brcms_pci_driver); 1234 bcma_driver_unregister(&brcms_bcma_driver);
1281
1282} 1235}
1283 1236
1284module_init(brcms_module_init); 1237module_init(brcms_module_init);
@@ -1549,11 +1502,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
1549 if (le32_to_cpu(hdr->idx) == idx) { 1502 if (le32_to_cpu(hdr->idx) == idx) {
1550 pdata = wl->fw.fw_bin[i]->data + 1503 pdata = wl->fw.fw_bin[i]->data +
1551 le32_to_cpu(hdr->offset); 1504 le32_to_cpu(hdr->offset);
1552 *pbuf = kmalloc(len, GFP_ATOMIC); 1505 *pbuf = kmemdup(pdata, len, GFP_ATOMIC);
1553 if (*pbuf == NULL) 1506 if (*pbuf == NULL)
1554 goto fail; 1507 goto fail;
1555 1508
1556 memcpy(*pbuf, pdata, len);
1557 return 0; 1509 return 0;
1558 } 1510 }
1559 } 1511 }
@@ -1566,7 +1518,7 @@ fail:
1566} 1518}
1567 1519
1568/* 1520/*
1569 * Precondition: Since this function is called in brcms_pci_probe() context, 1521 * Precondition: Since this function is called in brcms_bcma_probe() context,
1570 * no locking is required. 1522 * no locking is required.
1571 */ 1523 */
1572int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx) 1524int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
@@ -1606,7 +1558,7 @@ void brcms_ucode_free_buf(void *p)
1606/* 1558/*
1607 * checks validity of all firmware images loaded from user space 1559 * checks validity of all firmware images loaded from user space
1608 * 1560 *
1609 * Precondition: Since this function is called in brcms_pci_probe() context, 1561 * Precondition: Since this function is called in brcms_bcma_probe() context,
1610 * no locking is required. 1562 * no locking is required.
1611 */ 1563 */
1612int brcms_check_firmwares(struct brcms_info *wl) 1564int brcms_check_firmwares(struct brcms_info *wl)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 6242f188b717..8f60419c37bf 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,8 +68,6 @@ struct brcms_info {
68 spinlock_t lock; /* per-device perimeter lock */ 68 spinlock_t lock; /* per-device perimeter lock */
69 spinlock_t isr_lock; /* per-device ISR synchronization lock */ 69 spinlock_t isr_lock; /* per-device ISR synchronization lock */
70 70
71 /* regsva for unmap in brcms_free() */
72 void __iomem *regsva; /* opaque chip registers virtual address */
73 71
74 /* timer related fields */ 72 /* timer related fields */
75 atomic_t callbacks; /* # outstanding callback functions */ 73 atomic_t callbacks; /* # outstanding callback functions */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 36e3e0638300..f7ed34034f88 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -388,10 +388,13 @@ static u16 get_sifs(struct brcms_band *band)
388 */ 388 */
389static bool brcms_deviceremoved(struct brcms_c_info *wlc) 389static bool brcms_deviceremoved(struct brcms_c_info *wlc)
390{ 390{
391 u32 macctrl;
392
391 if (!wlc->hw->clk) 393 if (!wlc->hw->clk)
392 return ai_deviceremoved(wlc->hw->sih); 394 return ai_deviceremoved(wlc->hw->sih);
393 return (R_REG(&wlc->hw->regs->maccontrol) & 395 macctrl = bcma_read32(wlc->hw->d11core,
394 (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN; 396 D11REGOFFS(maccontrol));
397 return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
395} 398}
396 399
397/* sum the individual fifo tx pending packet counts */ 400/* sum the individual fifo tx pending packet counts */
@@ -582,17 +585,15 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
582static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw, 585static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
583 bool shortslot) 586 bool shortslot)
584{ 587{
585 struct d11regs __iomem *regs; 588 struct bcma_device *core = wlc_hw->d11core;
586
587 regs = wlc_hw->regs;
588 589
589 if (shortslot) { 590 if (shortslot) {
590 /* 11g short slot: 11a timing */ 591 /* 11g short slot: 11a timing */
591 W_REG(&regs->ifs_slot, 0x0207); /* APHY_SLOT_TIME */ 592 bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207);
592 brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME); 593 brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
593 } else { 594 } else {
594 /* 11g long slot: 11b timing */ 595 /* 11g long slot: 11b timing */
595 W_REG(&regs->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */ 596 bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212);
596 brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME); 597 brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
597 } 598 }
598} 599}
@@ -672,24 +673,22 @@ static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
672static void brcms_c_write_inits(struct brcms_hardware *wlc_hw, 673static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
673 const struct d11init *inits) 674 const struct d11init *inits)
674{ 675{
676 struct bcma_device *core = wlc_hw->d11core;
675 int i; 677 int i;
676 u8 __iomem *base; 678 uint offset;
677 u8 __iomem *addr;
678 u16 size; 679 u16 size;
679 u32 value; 680 u32 value;
680 681
681 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 682 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
682 683
683 base = (u8 __iomem *)wlc_hw->regs;
684
685 for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) { 684 for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
686 size = le16_to_cpu(inits[i].size); 685 size = le16_to_cpu(inits[i].size);
687 addr = base + le16_to_cpu(inits[i].addr); 686 offset = le16_to_cpu(inits[i].addr);
688 value = le32_to_cpu(inits[i].value); 687 value = le32_to_cpu(inits[i].value);
689 if (size == 2) 688 if (size == 2)
690 W_REG((u16 __iomem *)addr, value); 689 bcma_write16(core, offset, value);
691 else if (size == 4) 690 else if (size == 4)
692 W_REG((u32 __iomem *)addr, value); 691 bcma_write32(core, offset, value);
693 else 692 else
694 break; 693 break;
695 } 694 }
@@ -739,6 +738,14 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
739 } 738 }
740} 739}
741 740
741static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
742{
743 struct bcma_device *core = wlc_hw->d11core;
744 u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m;
745
746 bcma_awrite32(core, BCMA_IOCTL, ioctl | v);
747}
748
742static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk) 749static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
743{ 750{
744 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk); 751 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
@@ -747,17 +754,17 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
747 754
748 if (OFF == clk) { /* clear gmode bit, put phy into reset */ 755 if (OFF == clk) { /* clear gmode bit, put phy into reset */
749 756
750 ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE), 757 brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE),
751 (SICF_PRST | SICF_FGC)); 758 (SICF_PRST | SICF_FGC));
752 udelay(1); 759 udelay(1);
753 ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST); 760 brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST);
754 udelay(1); 761 udelay(1);
755 762
756 } else { /* take phy out of reset */ 763 } else { /* take phy out of reset */
757 764
758 ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC); 765 brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC);
759 udelay(1); 766 udelay(1);
760 ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0); 767 brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
761 udelay(1); 768 udelay(1);
762 769
763 } 770 }
@@ -778,9 +785,14 @@ static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
778 wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit]; 785 wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
779 786
780 /* set gmode core flag */ 787 /* set gmode core flag */
781 if (wlc_hw->sbclk && !wlc_hw->noreset) 788 if (wlc_hw->sbclk && !wlc_hw->noreset) {
782 ai_core_cflags(wlc_hw->sih, SICF_GMODE, 789 u32 gmode = 0;
783 ((bandunit == 0) ? SICF_GMODE : 0)); 790
791 if (bandunit == 0)
792 gmode = SICF_GMODE;
793
794 brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode);
795 }
784} 796}
785 797
786/* switch to new band but leave it inactive */ 798/* switch to new band but leave it inactive */
@@ -788,10 +800,12 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
788{ 800{
789 struct brcms_hardware *wlc_hw = wlc->hw; 801 struct brcms_hardware *wlc_hw = wlc->hw;
790 u32 macintmask; 802 u32 macintmask;
803 u32 macctrl;
791 804
792 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); 805 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
793 806 macctrl = bcma_read32(wlc_hw->d11core,
794 WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0); 807 D11REGOFFS(maccontrol));
808 WARN_ON((macctrl & MCTL_EN_MAC) != 0);
795 809
796 /* disable interrupts */ 810 /* disable interrupts */
797 macintmask = brcms_intrsoff(wlc->wl); 811 macintmask = brcms_intrsoff(wlc->wl);
@@ -955,8 +969,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
955 brcms_c_txfifo_complete(wlc, queue, 1); 969 brcms_c_txfifo_complete(wlc, queue, 1);
956 970
957 if (lastframe) { 971 if (lastframe) {
958 p->next = NULL;
959 p->prev = NULL;
960 /* remove PLCP & Broadcom tx descriptor header */ 972 /* remove PLCP & Broadcom tx descriptor header */
961 skb_pull(p, D11_PHY_HDR_LEN); 973 skb_pull(p, D11_PHY_HDR_LEN);
962 skb_pull(p, D11_TXH_LEN); 974 skb_pull(p, D11_TXH_LEN);
@@ -984,7 +996,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
984{ 996{
985 bool morepending = false; 997 bool morepending = false;
986 struct brcms_c_info *wlc = wlc_hw->wlc; 998 struct brcms_c_info *wlc = wlc_hw->wlc;
987 struct d11regs __iomem *regs; 999 struct bcma_device *core;
988 struct tx_status txstatus, *txs; 1000 struct tx_status txstatus, *txs;
989 u32 s1, s2; 1001 u32 s1, s2;
990 uint n = 0; 1002 uint n = 0;
@@ -997,18 +1009,18 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
997 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); 1009 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
998 1010
999 txs = &txstatus; 1011 txs = &txstatus;
1000 regs = wlc_hw->regs; 1012 core = wlc_hw->d11core;
1001 *fatal = false; 1013 *fatal = false;
1014 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
1002 while (!(*fatal) 1015 while (!(*fatal)
1003 && (s1 = R_REG(&regs->frmtxstatus)) & TXS_V) { 1016 && (s1 & TXS_V)) {
1004 1017
1005 if (s1 == 0xffffffff) { 1018 if (s1 == 0xffffffff) {
1006 wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", 1019 wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
1007 wlc_hw->unit, __func__); 1020 wlc_hw->unit, __func__);
1008 return morepending; 1021 return morepending;
1009 } 1022 }
1010 1023 s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
1011 s2 = R_REG(&regs->frmtxstatus2);
1012 1024
1013 txs->status = s1 & TXS_STATUS_MASK; 1025 txs->status = s1 & TXS_STATUS_MASK;
1014 txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; 1026 txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
@@ -1021,6 +1033,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1021 /* !give others some time to run! */ 1033 /* !give others some time to run! */
1022 if (++n >= max_tx_num) 1034 if (++n >= max_tx_num)
1023 break; 1035 break;
1036 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
1024 } 1037 }
1025 1038
1026 if (*fatal) 1039 if (*fatal)
@@ -1065,12 +1078,12 @@ brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
1065 } 1078 }
1066} 1079}
1067 1080
1068static struct dma64regs __iomem * 1081static uint
1069dmareg(struct brcms_hardware *hw, uint direction, uint fifonum) 1082dmareg(uint direction, uint fifonum)
1070{ 1083{
1071 if (direction == DMA_TX) 1084 if (direction == DMA_TX)
1072 return &(hw->regs->fifo64regs[fifonum].dmaxmt); 1085 return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt);
1073 return &(hw->regs->fifo64regs[fifonum].dmarcv); 1086 return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv);
1074} 1087}
1075 1088
1076static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme) 1089static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
@@ -1096,9 +1109,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
1096 * TX: TX_AC_BK_FIFO (TX AC Background data packets) 1109 * TX: TX_AC_BK_FIFO (TX AC Background data packets)
1097 * RX: RX_FIFO (RX data packets) 1110 * RX: RX_FIFO (RX data packets)
1098 */ 1111 */
1099 wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, 1112 wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
1100 (wme ? dmareg(wlc_hw, DMA_TX, 0) : 1113 (wme ? dmareg(DMA_TX, 0) : 0),
1101 NULL), dmareg(wlc_hw, DMA_RX, 0), 1114 dmareg(DMA_RX, 0),
1102 (wme ? NTXD : 0), NRXD, 1115 (wme ? NTXD : 0), NRXD,
1103 RXBUFSZ, -1, NRXBUFPOST, 1116 RXBUFSZ, -1, NRXBUFPOST,
1104 BRCMS_HWRXOFF, &brcm_msg_level); 1117 BRCMS_HWRXOFF, &brcm_msg_level);
@@ -1110,8 +1123,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
1110 * (legacy) TX_DATA_FIFO (TX data packets) 1123 * (legacy) TX_DATA_FIFO (TX data packets)
1111 * RX: UNUSED 1124 * RX: UNUSED
1112 */ 1125 */
1113 wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, 1126 wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
1114 dmareg(wlc_hw, DMA_TX, 1), NULL, 1127 dmareg(DMA_TX, 1), 0,
1115 NTXD, 0, 0, -1, 0, 0, 1128 NTXD, 0, 0, -1, 0, 0,
1116 &brcm_msg_level); 1129 &brcm_msg_level);
1117 dma_attach_err |= (NULL == wlc_hw->di[1]); 1130 dma_attach_err |= (NULL == wlc_hw->di[1]);
@@ -1121,8 +1134,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
1121 * TX: TX_AC_VI_FIFO (TX AC Video data packets) 1134 * TX: TX_AC_VI_FIFO (TX AC Video data packets)
1122 * RX: UNUSED 1135 * RX: UNUSED
1123 */ 1136 */
1124 wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, 1137 wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
1125 dmareg(wlc_hw, DMA_TX, 2), NULL, 1138 dmareg(DMA_TX, 2), 0,
1126 NTXD, 0, 0, -1, 0, 0, 1139 NTXD, 0, 0, -1, 0, 0,
1127 &brcm_msg_level); 1140 &brcm_msg_level);
1128 dma_attach_err |= (NULL == wlc_hw->di[2]); 1141 dma_attach_err |= (NULL == wlc_hw->di[2]);
@@ -1131,9 +1144,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
1131 * TX: TX_AC_VO_FIFO (TX AC Voice data packets) 1144 * TX: TX_AC_VO_FIFO (TX AC Voice data packets)
1132 * (legacy) TX_CTL_FIFO (TX control & mgmt packets) 1145 * (legacy) TX_CTL_FIFO (TX control & mgmt packets)
1133 */ 1146 */
1134 wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, 1147 wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
1135 dmareg(wlc_hw, DMA_TX, 3), 1148 dmareg(DMA_TX, 3),
1136 NULL, NTXD, 0, 0, -1, 1149 0, NTXD, 0, 0, -1,
1137 0, 0, &brcm_msg_level); 1150 0, 0, &brcm_msg_level);
1138 dma_attach_err |= (NULL == wlc_hw->di[3]); 1151 dma_attach_err |= (NULL == wlc_hw->di[3]);
1139/* Cleaner to leave this as if with AP defined */ 1152/* Cleaner to leave this as if with AP defined */
@@ -1207,7 +1220,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
1207/* control chip clock to save power, enable dynamic clock or force fast clock */ 1220/* control chip clock to save power, enable dynamic clock or force fast clock */
1208static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode) 1221static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
1209{ 1222{
1210 if (wlc_hw->sih->cccaps & CC_CAP_PMU) { 1223 if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
1211 /* new chips with PMU, CCS_FORCEHT will distribute the HT clock 1224 /* new chips with PMU, CCS_FORCEHT will distribute the HT clock
1212 * on backplane, but mac core will still run on ALP(not HT) when 1225 * on backplane, but mac core will still run on ALP(not HT) when
1213 * it enters powersave mode, which means the FCA bit may not be 1226 * it enters powersave mode, which means the FCA bit may not be
@@ -1216,29 +1229,33 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
1216 1229
1217 if (wlc_hw->clk) { 1230 if (wlc_hw->clk) {
1218 if (mode == CLK_FAST) { 1231 if (mode == CLK_FAST) {
1219 OR_REG(&wlc_hw->regs->clk_ctl_st, 1232 bcma_set32(wlc_hw->d11core,
1220 CCS_FORCEHT); 1233 D11REGOFFS(clk_ctl_st),
1234 CCS_FORCEHT);
1221 1235
1222 udelay(64); 1236 udelay(64);
1223 1237
1224 SPINWAIT(((R_REG 1238 SPINWAIT(
1225 (&wlc_hw->regs-> 1239 ((bcma_read32(wlc_hw->d11core,
1226 clk_ctl_st) & CCS_HTAVAIL) == 0), 1240 D11REGOFFS(clk_ctl_st)) &
1227 PMU_MAX_TRANSITION_DLY); 1241 CCS_HTAVAIL) == 0),
1228 WARN_ON(!(R_REG 1242 PMU_MAX_TRANSITION_DLY);
1229 (&wlc_hw->regs-> 1243 WARN_ON(!(bcma_read32(wlc_hw->d11core,
1230 clk_ctl_st) & CCS_HTAVAIL)); 1244 D11REGOFFS(clk_ctl_st)) &
1245 CCS_HTAVAIL));
1231 } else { 1246 } else {
1232 if ((wlc_hw->sih->pmurev == 0) && 1247 if ((ai_get_pmurev(wlc_hw->sih) == 0) &&
1233 (R_REG 1248 (bcma_read32(wlc_hw->d11core,
1234 (&wlc_hw->regs-> 1249 D11REGOFFS(clk_ctl_st)) &
1235 clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ))) 1250 (CCS_FORCEHT | CCS_HTAREQ)))
1236 SPINWAIT(((R_REG 1251 SPINWAIT(
1237 (&wlc_hw->regs-> 1252 ((bcma_read32(wlc_hw->d11core,
1238 clk_ctl_st) & CCS_HTAVAIL) 1253 offsetof(struct d11regs,
1239 == 0), 1254 clk_ctl_st)) &
1240 PMU_MAX_TRANSITION_DLY); 1255 CCS_HTAVAIL) == 0),
1241 AND_REG(&wlc_hw->regs->clk_ctl_st, 1256 PMU_MAX_TRANSITION_DLY);
1257 bcma_mask32(wlc_hw->d11core,
1258 D11REGOFFS(clk_ctl_st),
1242 ~CCS_FORCEHT); 1259 ~CCS_FORCEHT);
1243 } 1260 }
1244 } 1261 }
@@ -1253,7 +1270,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
1253 1270
1254 /* check fast clock is available (if core is not in reset) */ 1271 /* check fast clock is available (if core is not in reset) */
1255 if (wlc_hw->forcefastclk && wlc_hw->clk) 1272 if (wlc_hw->forcefastclk && wlc_hw->clk)
1256 WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) & 1273 WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) &
1257 SISF_FCLKA)); 1274 SISF_FCLKA));
1258 1275
1259 /* 1276 /*
@@ -1370,7 +1387,8 @@ static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
1370 maccontrol |= MCTL_INFRA; 1387 maccontrol |= MCTL_INFRA;
1371 } 1388 }
1372 1389
1373 W_REG(&wlc_hw->regs->maccontrol, maccontrol); 1390 bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol),
1391 maccontrol);
1374} 1392}
1375 1393
1376/* set or clear maccontrol bits */ 1394/* set or clear maccontrol bits */
@@ -1464,7 +1482,7 @@ static void
1464brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset, 1482brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
1465 const u8 *addr) 1483 const u8 *addr)
1466{ 1484{
1467 struct d11regs __iomem *regs; 1485 struct bcma_device *core = wlc_hw->d11core;
1468 u16 mac_l; 1486 u16 mac_l;
1469 u16 mac_m; 1487 u16 mac_m;
1470 u16 mac_h; 1488 u16 mac_h;
@@ -1472,38 +1490,36 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
1472 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n", 1490 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
1473 wlc_hw->unit); 1491 wlc_hw->unit);
1474 1492
1475 regs = wlc_hw->regs;
1476 mac_l = addr[0] | (addr[1] << 8); 1493 mac_l = addr[0] | (addr[1] << 8);
1477 mac_m = addr[2] | (addr[3] << 8); 1494 mac_m = addr[2] | (addr[3] << 8);
1478 mac_h = addr[4] | (addr[5] << 8); 1495 mac_h = addr[4] | (addr[5] << 8);
1479 1496
1480 /* enter the MAC addr into the RXE match registers */ 1497 /* enter the MAC addr into the RXE match registers */
1481 W_REG(&regs->rcm_ctl, RCM_INC_DATA | match_reg_offset); 1498 bcma_write16(core, D11REGOFFS(rcm_ctl),
1482 W_REG(&regs->rcm_mat_data, mac_l); 1499 RCM_INC_DATA | match_reg_offset);
1483 W_REG(&regs->rcm_mat_data, mac_m); 1500 bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l);
1484 W_REG(&regs->rcm_mat_data, mac_h); 1501 bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m);
1485 1502 bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h);
1486} 1503}
1487 1504
1488void 1505void
1489brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len, 1506brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
1490 void *buf) 1507 void *buf)
1491{ 1508{
1492 struct d11regs __iomem *regs; 1509 struct bcma_device *core = wlc_hw->d11core;
1493 u32 word; 1510 u32 word;
1494 __le32 word_le; 1511 __le32 word_le;
1495 __be32 word_be; 1512 __be32 word_be;
1496 bool be_bit; 1513 bool be_bit;
1497 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 1514 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
1498 1515
1499 regs = wlc_hw->regs; 1516 bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
1500 W_REG(&regs->tplatewrptr, offset);
1501 1517
1502 /* if MCTL_BIGEND bit set in mac control register, 1518 /* if MCTL_BIGEND bit set in mac control register,
1503 * the chip swaps data in fifo, as well as data in 1519 * the chip swaps data in fifo, as well as data in
1504 * template ram 1520 * template ram
1505 */ 1521 */
1506 be_bit = (R_REG(&regs->maccontrol) & MCTL_BIGEND) != 0; 1522 be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0;
1507 1523
1508 while (len > 0) { 1524 while (len > 0) {
1509 memcpy(&word, buf, sizeof(u32)); 1525 memcpy(&word, buf, sizeof(u32));
@@ -1516,7 +1532,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
1516 word = *(u32 *)&word_le; 1532 word = *(u32 *)&word_le;
1517 } 1533 }
1518 1534
1519 W_REG(&regs->tplatewrdata, word); 1535 bcma_write32(core, D11REGOFFS(tplatewrdata), word);
1520 1536
1521 buf = (u8 *) buf + sizeof(u32); 1537 buf = (u8 *) buf + sizeof(u32);
1522 len -= sizeof(u32); 1538 len -= sizeof(u32);
@@ -1527,18 +1543,20 @@ static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
1527{ 1543{
1528 wlc_hw->band->CWmin = newmin; 1544 wlc_hw->band->CWmin = newmin;
1529 1545
1530 W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN); 1546 bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
1531 (void)R_REG(&wlc_hw->regs->objaddr); 1547 OBJADDR_SCR_SEL | S_DOT11_CWMIN);
1532 W_REG(&wlc_hw->regs->objdata, newmin); 1548 (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
1549 bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin);
1533} 1550}
1534 1551
1535static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax) 1552static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
1536{ 1553{
1537 wlc_hw->band->CWmax = newmax; 1554 wlc_hw->band->CWmax = newmax;
1538 1555
1539 W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX); 1556 bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
1540 (void)R_REG(&wlc_hw->regs->objaddr); 1557 OBJADDR_SCR_SEL | S_DOT11_CWMAX);
1541 W_REG(&wlc_hw->regs->objdata, newmax); 1558 (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
1559 bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax);
1542} 1560}
1543 1561
1544void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw) 1562void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
@@ -1704,17 +1722,17 @@ void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
1704{ 1722{
1705 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 1723 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
1706 1724
1707 ai_corereg(wlc_hw->sih, SI_CC_IDX, 1725 ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
1708 offsetof(struct chipcregs, chipcontrol_addr), ~0, 0); 1726 ~0, 0);
1709 udelay(1); 1727 udelay(1);
1710 ai_corereg(wlc_hw->sih, SI_CC_IDX, 1728 ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
1711 offsetof(struct chipcregs, chipcontrol_data), 0x4, 0); 1729 0x4, 0);
1712 udelay(1); 1730 udelay(1);
1713 ai_corereg(wlc_hw->sih, SI_CC_IDX, 1731 ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
1714 offsetof(struct chipcregs, chipcontrol_data), 0x4, 4); 1732 0x4, 4);
1715 udelay(1); 1733 udelay(1);
1716 ai_corereg(wlc_hw->sih, SI_CC_IDX, 1734 ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
1717 offsetof(struct chipcregs, chipcontrol_data), 0x4, 0); 1735 0x4, 0);
1718 udelay(1); 1736 udelay(1);
1719} 1737}
1720 1738
@@ -1728,18 +1746,18 @@ void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
1728 return; 1746 return;
1729 1747
1730 if (ON == clk) 1748 if (ON == clk)
1731 ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC); 1749 brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC);
1732 else 1750 else
1733 ai_core_cflags(wlc_hw->sih, SICF_FGC, 0); 1751 brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
1734 1752
1735} 1753}
1736 1754
1737void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk) 1755void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
1738{ 1756{
1739 if (ON == clk) 1757 if (ON == clk)
1740 ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE); 1758 brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE);
1741 else 1759 else
1742 ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0); 1760 brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0);
1743} 1761}
1744 1762
1745void brcms_b_phy_reset(struct brcms_hardware *wlc_hw) 1763void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
@@ -1759,7 +1777,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
1759 if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) && 1777 if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
1760 NREV_LE(wlc_hw->band->phyrev, 4)) { 1778 NREV_LE(wlc_hw->band->phyrev, 4)) {
1761 /* Set the PHY bandwidth */ 1779 /* Set the PHY bandwidth */
1762 ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits); 1780 brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits);
1763 1781
1764 udelay(1); 1782 udelay(1);
1765 1783
@@ -1767,13 +1785,13 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
1767 brcms_b_core_phypll_reset(wlc_hw); 1785 brcms_b_core_phypll_reset(wlc_hw);
1768 1786
1769 /* reset the PHY */ 1787 /* reset the PHY */
1770 ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE), 1788 brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
1771 (SICF_PRST | SICF_PCLKE)); 1789 (SICF_PRST | SICF_PCLKE));
1772 phy_in_reset = true; 1790 phy_in_reset = true;
1773 } else { 1791 } else {
1774 ai_core_cflags(wlc_hw->sih, 1792 brcms_b_core_ioctl(wlc_hw,
1775 (SICF_PRST | SICF_PCLKE | SICF_BWMASK), 1793 (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
1776 (SICF_PRST | SICF_PCLKE | phy_bw_clkbits)); 1794 (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
1777 } 1795 }
1778 1796
1779 udelay(2); 1797 udelay(2);
@@ -1790,8 +1808,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
1790 u32 macintmask; 1808 u32 macintmask;
1791 1809
1792 /* Enable the d11 core before accessing it */ 1810 /* Enable the d11 core before accessing it */
1793 if (!ai_iscoreup(wlc_hw->sih)) { 1811 if (!bcma_core_is_enabled(wlc_hw->d11core)) {
1794 ai_core_reset(wlc_hw->sih, 0, 0); 1812 bcma_core_enable(wlc_hw->d11core, 0);
1795 brcms_c_mctrl_reset(wlc_hw); 1813 brcms_c_mctrl_reset(wlc_hw);
1796 } 1814 }
1797 1815
@@ -1817,7 +1835,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
1817 brcms_intrsrestore(wlc->wl, macintmask); 1835 brcms_intrsrestore(wlc->wl, macintmask);
1818 1836
1819 /* ucode should still be suspended.. */ 1837 /* ucode should still be suspended.. */
1820 WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0); 1838 WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) &
1839 MCTL_EN_MAC) != 0);
1821} 1840}
1822 1841
1823static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw) 1842static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
@@ -1845,7 +1864,7 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
1845 uint b2 = boardrev & 0xf; 1864 uint b2 = boardrev & 0xf;
1846 1865
1847 /* voards from other vendors are always considered valid */ 1866 /* voards from other vendors are always considered valid */
1848 if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM) 1867 if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM)
1849 return true; 1868 return true;
1850 1869
1851 /* do some boardrev sanity checks when boardvendor is Broadcom */ 1870 /* do some boardrev sanity checks when boardvendor is Broadcom */
@@ -1917,7 +1936,7 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
1917static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw) 1936static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
1918{ 1937{
1919 bool v, clk, xtal; 1938 bool v, clk, xtal;
1920 u32 resetbits = 0, flags = 0; 1939 u32 flags = 0;
1921 1940
1922 xtal = wlc_hw->sbclk; 1941 xtal = wlc_hw->sbclk;
1923 if (!xtal) 1942 if (!xtal)
@@ -1934,22 +1953,22 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
1934 flags |= SICF_PCLKE; 1953 flags |= SICF_PCLKE;
1935 1954
1936 /* 1955 /*
1956 * TODO: test suspend/resume
1957 *
1937 * AI chip doesn't restore bar0win2 on 1958 * AI chip doesn't restore bar0win2 on
1938 * hibernation/resume, need sw fixup 1959 * hibernation/resume, need sw fixup
1939 */ 1960 */
1940 if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) || 1961
1941 (wlc_hw->sih->chip == BCM43225_CHIP_ID)) 1962 bcma_core_enable(wlc_hw->d11core, flags);
1942 wlc_hw->regs = (struct d11regs __iomem *)
1943 ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
1944 ai_core_reset(wlc_hw->sih, flags, resetbits);
1945 brcms_c_mctrl_reset(wlc_hw); 1963 brcms_c_mctrl_reset(wlc_hw);
1946 } 1964 }
1947 1965
1948 v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0); 1966 v = ((bcma_read32(wlc_hw->d11core,
1967 D11REGOFFS(phydebug)) & PDBG_RFD) != 0);
1949 1968
1950 /* put core back into reset */ 1969 /* put core back into reset */
1951 if (!clk) 1970 if (!clk)
1952 ai_core_disable(wlc_hw->sih, 0); 1971 bcma_core_disable(wlc_hw->d11core, 0);
1953 1972
1954 if (!xtal) 1973 if (!xtal)
1955 brcms_b_xtal(wlc_hw, OFF); 1974 brcms_b_xtal(wlc_hw, OFF);
@@ -1973,25 +1992,21 @@ static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
1973 */ 1992 */
1974void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags) 1993void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
1975{ 1994{
1976 struct d11regs __iomem *regs;
1977 uint i; 1995 uint i;
1978 bool fastclk; 1996 bool fastclk;
1979 u32 resetbits = 0;
1980 1997
1981 if (flags == BRCMS_USE_COREFLAGS) 1998 if (flags == BRCMS_USE_COREFLAGS)
1982 flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0); 1999 flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
1983 2000
1984 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 2001 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
1985 2002
1986 regs = wlc_hw->regs;
1987
1988 /* request FAST clock if not on */ 2003 /* request FAST clock if not on */
1989 fastclk = wlc_hw->forcefastclk; 2004 fastclk = wlc_hw->forcefastclk;
1990 if (!fastclk) 2005 if (!fastclk)
1991 brcms_b_clkctl_clk(wlc_hw, CLK_FAST); 2006 brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
1992 2007
1993 /* reset the dma engines except first time thru */ 2008 /* reset the dma engines except first time thru */
1994 if (ai_iscoreup(wlc_hw->sih)) { 2009 if (bcma_core_is_enabled(wlc_hw->d11core)) {
1995 for (i = 0; i < NFIFO; i++) 2010 for (i = 0; i < NFIFO; i++)
1996 if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i]))) 2011 if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
1997 wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: " 2012 wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
@@ -2029,14 +2044,14 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
2029 * they may touch chipcommon as well. 2044 * they may touch chipcommon as well.
2030 */ 2045 */
2031 wlc_hw->clk = false; 2046 wlc_hw->clk = false;
2032 ai_core_reset(wlc_hw->sih, flags, resetbits); 2047 bcma_core_enable(wlc_hw->d11core, flags);
2033 wlc_hw->clk = true; 2048 wlc_hw->clk = true;
2034 if (wlc_hw->band && wlc_hw->band->pi) 2049 if (wlc_hw->band && wlc_hw->band->pi)
2035 wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true); 2050 wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
2036 2051
2037 brcms_c_mctrl_reset(wlc_hw); 2052 brcms_c_mctrl_reset(wlc_hw);
2038 2053
2039 if (wlc_hw->sih->cccaps & CC_CAP_PMU) 2054 if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
2040 brcms_b_clkctl_clk(wlc_hw, CLK_FAST); 2055 brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
2041 2056
2042 brcms_b_phy_reset(wlc_hw); 2057 brcms_b_phy_reset(wlc_hw);
@@ -2057,7 +2072,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
2057 */ 2072 */
2058static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw) 2073static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
2059{ 2074{
2060 struct d11regs __iomem *regs = wlc_hw->regs; 2075 struct bcma_device *core = wlc_hw->d11core;
2061 u16 fifo_nu; 2076 u16 fifo_nu;
2062 u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk; 2077 u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
2063 u16 txfifo_def, txfifo_def1; 2078 u16 txfifo_def, txfifo_def1;
@@ -2078,11 +2093,11 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
2078 txfifo_cmd = 2093 txfifo_cmd =
2079 TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT); 2094 TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
2080 2095
2081 W_REG(&regs->xmtfifocmd, txfifo_cmd); 2096 bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
2082 W_REG(&regs->xmtfifodef, txfifo_def); 2097 bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def);
2083 W_REG(&regs->xmtfifodef1, txfifo_def1); 2098 bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1);
2084 2099
2085 W_REG(&regs->xmtfifocmd, txfifo_cmd); 2100 bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
2086 2101
2087 txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu]; 2102 txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
2088 } 2103 }
@@ -2117,27 +2132,27 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
2117 2132
2118void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode) 2133void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
2119{ 2134{
2120 struct d11regs __iomem *regs = wlc_hw->regs; 2135 struct bcma_device *core = wlc_hw->d11core;
2121 2136
2122 if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) || 2137 if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
2123 (wlc_hw->sih->chip == BCM43225_CHIP_ID)) { 2138 (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
2124 if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */ 2139 if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
2125 W_REG(&regs->tsf_clk_frac_l, 0x2082); 2140 bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
2126 W_REG(&regs->tsf_clk_frac_h, 0x8); 2141 bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
2127 } else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */ 2142 } else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */
2128 W_REG(&regs->tsf_clk_frac_l, 0x5341); 2143 bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341);
2129 W_REG(&regs->tsf_clk_frac_h, 0x8); 2144 bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
2130 } else { /* 120Mhz */ 2145 } else { /* 120Mhz */
2131 W_REG(&regs->tsf_clk_frac_l, 0x8889); 2146 bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889);
2132 W_REG(&regs->tsf_clk_frac_h, 0x8); 2147 bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
2133 } 2148 }
2134 } else if (BRCMS_ISLCNPHY(wlc_hw->band)) { 2149 } else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
2135 if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */ 2150 if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */
2136 W_REG(&regs->tsf_clk_frac_l, 0x7CE0); 2151 bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0);
2137 W_REG(&regs->tsf_clk_frac_h, 0xC); 2152 bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
2138 } else { /* 80Mhz */ 2153 } else { /* 80Mhz */
2139 W_REG(&regs->tsf_clk_frac_l, 0xCCCD); 2154 bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD);
2140 W_REG(&regs->tsf_clk_frac_h, 0xC); 2155 bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
2141 } 2156 }
2142 } 2157 }
2143} 2158}
@@ -2146,11 +2161,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
2146static void brcms_c_gpio_init(struct brcms_c_info *wlc) 2161static void brcms_c_gpio_init(struct brcms_c_info *wlc)
2147{ 2162{
2148 struct brcms_hardware *wlc_hw = wlc->hw; 2163 struct brcms_hardware *wlc_hw = wlc->hw;
2149 struct d11regs __iomem *regs;
2150 u32 gc, gm; 2164 u32 gc, gm;
2151 2165
2152 regs = wlc_hw->regs;
2153
2154 /* use GPIO select 0 to get all gpio signals from the gpio out reg */ 2166 /* use GPIO select 0 to get all gpio signals from the gpio out reg */
2155 brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0); 2167 brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
2156 2168
@@ -2181,10 +2193,10 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
2181 * The board itself is powered by these GPIOs 2193 * The board itself is powered by these GPIOs
2182 * (when not sending pattern) so set them high 2194 * (when not sending pattern) so set them high
2183 */ 2195 */
2184 OR_REG(&regs->psm_gpio_oe, 2196 bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe),
2185 (BOARD_GPIO_12 | BOARD_GPIO_13)); 2197 (BOARD_GPIO_12 | BOARD_GPIO_13));
2186 OR_REG(&regs->psm_gpio_out, 2198 bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out),
2187 (BOARD_GPIO_12 | BOARD_GPIO_13)); 2199 (BOARD_GPIO_12 | BOARD_GPIO_13));
2188 2200
2189 /* Enable antenna diversity, use 2x4 mode */ 2201 /* Enable antenna diversity, use 2x4 mode */
2190 brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN, 2202 brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
@@ -2211,7 +2223,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
2211static void brcms_ucode_write(struct brcms_hardware *wlc_hw, 2223static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
2212 const __le32 ucode[], const size_t nbytes) 2224 const __le32 ucode[], const size_t nbytes)
2213{ 2225{
2214 struct d11regs __iomem *regs = wlc_hw->regs; 2226 struct bcma_device *core = wlc_hw->d11core;
2215 uint i; 2227 uint i;
2216 uint count; 2228 uint count;
2217 2229
@@ -2219,10 +2231,11 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
2219 2231
2220 count = (nbytes / sizeof(u32)); 2232 count = (nbytes / sizeof(u32));
2221 2233
2222 W_REG(&regs->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL)); 2234 bcma_write32(core, D11REGOFFS(objaddr),
2223 (void)R_REG(&regs->objaddr); 2235 OBJADDR_AUTO_INC | OBJADDR_UCM_SEL);
2236 (void)bcma_read32(core, D11REGOFFS(objaddr));
2224 for (i = 0; i < count; i++) 2237 for (i = 0; i < count; i++)
2225 W_REG(&regs->objdata, le32_to_cpu(ucode[i])); 2238 bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i]));
2226 2239
2227} 2240}
2228 2241
@@ -2288,7 +2301,7 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
2288 bool fatal = false; 2301 bool fatal = false;
2289 uint unit; 2302 uint unit;
2290 uint intstatus, idx; 2303 uint intstatus, idx;
2291 struct d11regs __iomem *regs = wlc_hw->regs; 2304 struct bcma_device *core = wlc_hw->d11core;
2292 struct wiphy *wiphy = wlc_hw->wlc->wiphy; 2305 struct wiphy *wiphy = wlc_hw->wlc->wiphy;
2293 2306
2294 unit = wlc_hw->unit; 2307 unit = wlc_hw->unit;
@@ -2296,7 +2309,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
2296 for (idx = 0; idx < NFIFO; idx++) { 2309 for (idx = 0; idx < NFIFO; idx++) {
2297 /* read intstatus register and ignore any non-error bits */ 2310 /* read intstatus register and ignore any non-error bits */
2298 intstatus = 2311 intstatus =
2299 R_REG(&regs->intctrlregs[idx].intstatus) & I_ERRORS; 2312 bcma_read32(core,
2313 D11REGOFFS(intctrlregs[idx].intstatus)) &
2314 I_ERRORS;
2300 if (!intstatus) 2315 if (!intstatus)
2301 continue; 2316 continue;
2302 2317
@@ -2341,8 +2356,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
2341 brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */ 2356 brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */
2342 break; 2357 break;
2343 } else 2358 } else
2344 W_REG(&regs->intctrlregs[idx].intstatus, 2359 bcma_write32(core,
2345 intstatus); 2360 D11REGOFFS(intctrlregs[idx].intstatus),
2361 intstatus);
2346 } 2362 }
2347} 2363}
2348 2364
@@ -2350,28 +2366,7 @@ void brcms_c_intrson(struct brcms_c_info *wlc)
2350{ 2366{
2351 struct brcms_hardware *wlc_hw = wlc->hw; 2367 struct brcms_hardware *wlc_hw = wlc->hw;
2352 wlc->macintmask = wlc->defmacintmask; 2368 wlc->macintmask = wlc->defmacintmask;
2353 W_REG(&wlc_hw->regs->macintmask, wlc->macintmask); 2369 bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
2354}
2355
2356/*
2357 * callback for siutils.c, which has only wlc handler, no wl they both check
2358 * up, not only because there is no need to off/restore d11 interrupt but also
2359 * because per-port code may require sync with valid interrupt.
2360 */
2361static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
2362{
2363 if (!wlc->hw->up)
2364 return 0;
2365
2366 return brcms_intrsoff(wlc->wl);
2367}
2368
2369static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
2370{
2371 if (!wlc->hw->up)
2372 return;
2373
2374 brcms_intrsrestore(wlc->wl, macintmask);
2375} 2370}
2376 2371
2377u32 brcms_c_intrsoff(struct brcms_c_info *wlc) 2372u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
@@ -2384,8 +2379,8 @@ u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
2384 2379
2385 macintmask = wlc->macintmask; /* isr can still happen */ 2380 macintmask = wlc->macintmask; /* isr can still happen */
2386 2381
2387 W_REG(&wlc_hw->regs->macintmask, 0); 2382 bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0);
2388 (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */ 2383 (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask));
2389 udelay(1); /* ensure int line is no longer driven */ 2384 udelay(1); /* ensure int line is no longer driven */
2390 wlc->macintmask = 0; 2385 wlc->macintmask = 0;
2391 2386
@@ -2400,7 +2395,7 @@ void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
2400 return; 2395 return;
2401 2396
2402 wlc->macintmask = macintmask; 2397 wlc->macintmask = macintmask;
2403 W_REG(&wlc_hw->regs->macintmask, wlc->macintmask); 2398 bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
2404} 2399}
2405 2400
2406/* assumes that the d11 MAC is enabled */ 2401/* assumes that the d11 MAC is enabled */
@@ -2512,11 +2507,11 @@ brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx)
2512static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr) 2507static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
2513{ 2508{
2514 struct brcms_hardware *wlc_hw = wlc->hw; 2509 struct brcms_hardware *wlc_hw = wlc->hw;
2515 struct d11regs __iomem *regs = wlc_hw->regs; 2510 struct bcma_device *core = wlc_hw->d11core;
2516 u32 macintstatus; 2511 u32 macintstatus;
2517 2512
2518 /* macintstatus includes a DMA interrupt summary bit */ 2513 /* macintstatus includes a DMA interrupt summary bit */
2519 macintstatus = R_REG(&regs->macintstatus); 2514 macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
2520 2515
2521 BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit, 2516 BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
2522 macintstatus); 2517 macintstatus);
@@ -2543,12 +2538,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
2543 * consequences 2538 * consequences
2544 */ 2539 */
2545 /* turn off the interrupts */ 2540 /* turn off the interrupts */
2546 W_REG(&regs->macintmask, 0); 2541 bcma_write32(core, D11REGOFFS(macintmask), 0);
2547 (void)R_REG(&regs->macintmask); /* sync readback */ 2542 (void)bcma_read32(core, D11REGOFFS(macintmask));
2548 wlc->macintmask = 0; 2543 wlc->macintmask = 0;
2549 2544
2550 /* clear device interrupts */ 2545 /* clear device interrupts */
2551 W_REG(&regs->macintstatus, macintstatus); 2546 bcma_write32(core, D11REGOFFS(macintstatus), macintstatus);
2552 2547
2553 /* MI_DMAINT is indication of non-zero intstatus */ 2548 /* MI_DMAINT is indication of non-zero intstatus */
2554 if (macintstatus & MI_DMAINT) 2549 if (macintstatus & MI_DMAINT)
@@ -2557,8 +2552,8 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
2557 * RX_FIFO. If MI_DMAINT is set, assume it 2552 * RX_FIFO. If MI_DMAINT is set, assume it
2558 * is set and clear the interrupt. 2553 * is set and clear the interrupt.
2559 */ 2554 */
2560 W_REG(&regs->intctrlregs[RX_FIFO].intstatus, 2555 bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus),
2561 DEF_RXINTMASK); 2556 DEF_RXINTMASK);
2562 2557
2563 return macintstatus; 2558 return macintstatus;
2564} 2559}
@@ -2621,7 +2616,7 @@ bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
2621void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc) 2616void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2622{ 2617{
2623 struct brcms_hardware *wlc_hw = wlc->hw; 2618 struct brcms_hardware *wlc_hw = wlc->hw;
2624 struct d11regs __iomem *regs = wlc_hw->regs; 2619 struct bcma_device *core = wlc_hw->d11core;
2625 u32 mc, mi; 2620 u32 mc, mi;
2626 struct wiphy *wiphy = wlc->wiphy; 2621 struct wiphy *wiphy = wlc->wiphy;
2627 2622
@@ -2638,7 +2633,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2638 /* force the core awake */ 2633 /* force the core awake */
2639 brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND); 2634 brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
2640 2635
2641 mc = R_REG(&regs->maccontrol); 2636 mc = bcma_read32(core, D11REGOFFS(maccontrol));
2642 2637
2643 if (mc == 0xffffffff) { 2638 if (mc == 0xffffffff) {
2644 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, 2639 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
@@ -2650,7 +2645,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2650 WARN_ON(!(mc & MCTL_PSM_RUN)); 2645 WARN_ON(!(mc & MCTL_PSM_RUN));
2651 WARN_ON(!(mc & MCTL_EN_MAC)); 2646 WARN_ON(!(mc & MCTL_EN_MAC));
2652 2647
2653 mi = R_REG(&regs->macintstatus); 2648 mi = bcma_read32(core, D11REGOFFS(macintstatus));
2654 if (mi == 0xffffffff) { 2649 if (mi == 0xffffffff) {
2655 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, 2650 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
2656 __func__); 2651 __func__);
@@ -2661,21 +2656,21 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2661 2656
2662 brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0); 2657 brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
2663 2658
2664 SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD), 2659 SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD),
2665 BRCMS_MAX_MAC_SUSPEND); 2660 BRCMS_MAX_MAC_SUSPEND);
2666 2661
2667 if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) { 2662 if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
2668 wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS" 2663 wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
2669 " and MI_MACSSPNDD is still not on.\n", 2664 " and MI_MACSSPNDD is still not on.\n",
2670 wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND); 2665 wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
2671 wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, " 2666 wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
2672 "psm_brc 0x%04x\n", wlc_hw->unit, 2667 "psm_brc 0x%04x\n", wlc_hw->unit,
2673 R_REG(&regs->psmdebug), 2668 bcma_read32(core, D11REGOFFS(psmdebug)),
2674 R_REG(&regs->phydebug), 2669 bcma_read32(core, D11REGOFFS(phydebug)),
2675 R_REG(&regs->psm_brc)); 2670 bcma_read16(core, D11REGOFFS(psm_brc)));
2676 } 2671 }
2677 2672
2678 mc = R_REG(&regs->maccontrol); 2673 mc = bcma_read32(core, D11REGOFFS(maccontrol));
2679 if (mc == 0xffffffff) { 2674 if (mc == 0xffffffff) {
2680 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, 2675 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
2681 __func__); 2676 __func__);
@@ -2690,7 +2685,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2690void brcms_c_enable_mac(struct brcms_c_info *wlc) 2685void brcms_c_enable_mac(struct brcms_c_info *wlc)
2691{ 2686{
2692 struct brcms_hardware *wlc_hw = wlc->hw; 2687 struct brcms_hardware *wlc_hw = wlc->hw;
2693 struct d11regs __iomem *regs = wlc_hw->regs; 2688 struct bcma_device *core = wlc_hw->d11core;
2694 u32 mc, mi; 2689 u32 mc, mi;
2695 2690
2696 BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, 2691 BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
@@ -2703,20 +2698,20 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc)
2703 if (wlc_hw->mac_suspend_depth > 0) 2698 if (wlc_hw->mac_suspend_depth > 0)
2704 return; 2699 return;
2705 2700
2706 mc = R_REG(&regs->maccontrol); 2701 mc = bcma_read32(core, D11REGOFFS(maccontrol));
2707 WARN_ON(mc & MCTL_PSM_JMP_0); 2702 WARN_ON(mc & MCTL_PSM_JMP_0);
2708 WARN_ON(mc & MCTL_EN_MAC); 2703 WARN_ON(mc & MCTL_EN_MAC);
2709 WARN_ON(!(mc & MCTL_PSM_RUN)); 2704 WARN_ON(!(mc & MCTL_PSM_RUN));
2710 2705
2711 brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC); 2706 brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
2712 W_REG(&regs->macintstatus, MI_MACSSPNDD); 2707 bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD);
2713 2708
2714 mc = R_REG(&regs->maccontrol); 2709 mc = bcma_read32(core, D11REGOFFS(maccontrol));
2715 WARN_ON(mc & MCTL_PSM_JMP_0); 2710 WARN_ON(mc & MCTL_PSM_JMP_0);
2716 WARN_ON(!(mc & MCTL_EN_MAC)); 2711 WARN_ON(!(mc & MCTL_EN_MAC));
2717 WARN_ON(!(mc & MCTL_PSM_RUN)); 2712 WARN_ON(!(mc & MCTL_PSM_RUN));
2718 2713
2719 mi = R_REG(&regs->macintstatus); 2714 mi = bcma_read32(core, D11REGOFFS(macintstatus));
2720 WARN_ON(mi & MI_MACSSPNDD); 2715 WARN_ON(mi & MI_MACSSPNDD);
2721 2716
2722 brcms_c_ucode_wake_override_clear(wlc_hw, 2717 brcms_c_ucode_wake_override_clear(wlc_hw,
@@ -2733,55 +2728,53 @@ void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
2733 2728
2734static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw) 2729static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
2735{ 2730{
2736 struct d11regs __iomem *regs; 2731 struct bcma_device *core = wlc_hw->d11core;
2737 u32 w, val; 2732 u32 w, val;
2738 struct wiphy *wiphy = wlc_hw->wlc->wiphy; 2733 struct wiphy *wiphy = wlc_hw->wlc->wiphy;
2739 2734
2740 BCMMSG(wiphy, "wl%d\n", wlc_hw->unit); 2735 BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
2741 2736
2742 regs = wlc_hw->regs;
2743
2744 /* Validate dchip register access */ 2737 /* Validate dchip register access */
2745 2738
2746 W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0); 2739 bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
2747 (void)R_REG(&regs->objaddr); 2740 (void)bcma_read32(core, D11REGOFFS(objaddr));
2748 w = R_REG(&regs->objdata); 2741 w = bcma_read32(core, D11REGOFFS(objdata));
2749 2742
2750 /* Can we write and read back a 32bit register? */ 2743 /* Can we write and read back a 32bit register? */
2751 W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0); 2744 bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
2752 (void)R_REG(&regs->objaddr); 2745 (void)bcma_read32(core, D11REGOFFS(objaddr));
2753 W_REG(&regs->objdata, (u32) 0xaa5555aa); 2746 bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa);
2754 2747
2755 W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0); 2748 bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
2756 (void)R_REG(&regs->objaddr); 2749 (void)bcma_read32(core, D11REGOFFS(objaddr));
2757 val = R_REG(&regs->objdata); 2750 val = bcma_read32(core, D11REGOFFS(objdata));
2758 if (val != (u32) 0xaa5555aa) { 2751 if (val != (u32) 0xaa5555aa) {
2759 wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, " 2752 wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
2760 "expected 0xaa5555aa\n", wlc_hw->unit, val); 2753 "expected 0xaa5555aa\n", wlc_hw->unit, val);
2761 return false; 2754 return false;
2762 } 2755 }
2763 2756
2764 W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0); 2757 bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
2765 (void)R_REG(&regs->objaddr); 2758 (void)bcma_read32(core, D11REGOFFS(objaddr));
2766 W_REG(&regs->objdata, (u32) 0x55aaaa55); 2759 bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55);
2767 2760
2768 W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0); 2761 bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
2769 (void)R_REG(&regs->objaddr); 2762 (void)bcma_read32(core, D11REGOFFS(objaddr));
2770 val = R_REG(&regs->objdata); 2763 val = bcma_read32(core, D11REGOFFS(objdata));
2771 if (val != (u32) 0x55aaaa55) { 2764 if (val != (u32) 0x55aaaa55) {
2772 wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, " 2765 wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
2773 "expected 0x55aaaa55\n", wlc_hw->unit, val); 2766 "expected 0x55aaaa55\n", wlc_hw->unit, val);
2774 return false; 2767 return false;
2775 } 2768 }
2776 2769
2777 W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0); 2770 bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
2778 (void)R_REG(&regs->objaddr); 2771 (void)bcma_read32(core, D11REGOFFS(objaddr));
2779 W_REG(&regs->objdata, w); 2772 bcma_write32(core, D11REGOFFS(objdata), w);
2780 2773
2781 /* clear CFPStart */ 2774 /* clear CFPStart */
2782 W_REG(&regs->tsf_cfpstart, 0); 2775 bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0);
2783 2776
2784 w = R_REG(&regs->maccontrol); 2777 w = bcma_read32(core, D11REGOFFS(maccontrol));
2785 if ((w != (MCTL_IHR_EN | MCTL_WAKE)) && 2778 if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
2786 (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) { 2779 (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
2787 wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = " 2780 wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
@@ -2798,38 +2791,38 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
2798 2791
2799void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on) 2792void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
2800{ 2793{
2801 struct d11regs __iomem *regs; 2794 struct bcma_device *core = wlc_hw->d11core;
2802 u32 tmp; 2795 u32 tmp;
2803 2796
2804 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 2797 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
2805 2798
2806 tmp = 0; 2799 tmp = 0;
2807 regs = wlc_hw->regs;
2808 2800
2809 if (on) { 2801 if (on) {
2810 if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) { 2802 if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
2811 OR_REG(&regs->clk_ctl_st, 2803 bcma_set32(core, D11REGOFFS(clk_ctl_st),
2812 (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL | 2804 CCS_ERSRC_REQ_HT |
2813 CCS_ERSRC_REQ_PHYPLL)); 2805 CCS_ERSRC_REQ_D11PLL |
2814 SPINWAIT((R_REG(&regs->clk_ctl_st) & 2806 CCS_ERSRC_REQ_PHYPLL);
2815 (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT), 2807 SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
2808 CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT,
2816 PHYPLL_WAIT_US); 2809 PHYPLL_WAIT_US);
2817 2810
2818 tmp = R_REG(&regs->clk_ctl_st); 2811 tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
2819 if ((tmp & (CCS_ERSRC_AVAIL_HT)) != 2812 if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
2820 (CCS_ERSRC_AVAIL_HT))
2821 wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY" 2813 wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
2822 " PLL failed\n", __func__); 2814 " PLL failed\n", __func__);
2823 } else { 2815 } else {
2824 OR_REG(&regs->clk_ctl_st, 2816 bcma_set32(core, D11REGOFFS(clk_ctl_st),
2825 (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL)); 2817 tmp | CCS_ERSRC_REQ_D11PLL |
2826 SPINWAIT((R_REG(&regs->clk_ctl_st) & 2818 CCS_ERSRC_REQ_PHYPLL);
2819 SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
2827 (CCS_ERSRC_AVAIL_D11PLL | 2820 (CCS_ERSRC_AVAIL_D11PLL |
2828 CCS_ERSRC_AVAIL_PHYPLL)) != 2821 CCS_ERSRC_AVAIL_PHYPLL)) !=
2829 (CCS_ERSRC_AVAIL_D11PLL | 2822 (CCS_ERSRC_AVAIL_D11PLL |
2830 CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US); 2823 CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
2831 2824
2832 tmp = R_REG(&regs->clk_ctl_st); 2825 tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
2833 if ((tmp & 2826 if ((tmp &
2834 (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) 2827 (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
2835 != 2828 !=
@@ -2843,8 +2836,9 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
2843 * be requesting it; so we'll deassert the request but 2836 * be requesting it; so we'll deassert the request but
2844 * not wait for status to comply. 2837 * not wait for status to comply.
2845 */ 2838 */
2846 AND_REG(&regs->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL); 2839 bcma_mask32(core, D11REGOFFS(clk_ctl_st),
2847 tmp = R_REG(&regs->clk_ctl_st); 2840 ~CCS_ERSRC_REQ_PHYPLL);
2841 (void)bcma_read32(core, D11REGOFFS(clk_ctl_st));
2848 } 2842 }
2849} 2843}
2850 2844
@@ -2872,7 +2866,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
2872 brcms_b_core_phypll_ctl(wlc_hw, false); 2866 brcms_b_core_phypll_ctl(wlc_hw, false);
2873 2867
2874 wlc_hw->clk = false; 2868 wlc_hw->clk = false;
2875 ai_core_disable(wlc_hw->sih, 0); 2869 bcma_core_disable(wlc_hw->d11core, 0);
2876 wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false); 2870 wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
2877} 2871}
2878 2872
@@ -2896,35 +2890,31 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc)
2896static u16 2890static u16
2897brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel) 2891brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
2898{ 2892{
2899 struct d11regs __iomem *regs = wlc_hw->regs; 2893 struct bcma_device *core = wlc_hw->d11core;
2900 u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata; 2894 u16 objoff = D11REGOFFS(objdata);
2901 u16 __iomem *objdata_hi = objdata_lo + 1;
2902 u16 v;
2903 2895
2904 W_REG(&regs->objaddr, sel | (offset >> 2)); 2896 bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
2905 (void)R_REG(&regs->objaddr); 2897 (void)bcma_read32(core, D11REGOFFS(objaddr));
2906 if (offset & 2) 2898 if (offset & 2)
2907 v = R_REG(objdata_hi); 2899 objoff += 2;
2908 else
2909 v = R_REG(objdata_lo);
2910 2900
2911 return v; 2901 return bcma_read16(core, objoff);
2902;
2912} 2903}
2913 2904
2914static void 2905static void
2915brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v, 2906brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
2916 u32 sel) 2907 u32 sel)
2917{ 2908{
2918 struct d11regs __iomem *regs = wlc_hw->regs; 2909 struct bcma_device *core = wlc_hw->d11core;
2919 u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata; 2910 u16 objoff = D11REGOFFS(objdata);
2920 u16 __iomem *objdata_hi = objdata_lo + 1;
2921 2911
2922 W_REG(&regs->objaddr, sel | (offset >> 2)); 2912 bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
2923 (void)R_REG(&regs->objaddr); 2913 (void)bcma_read32(core, D11REGOFFS(objaddr));
2924 if (offset & 2) 2914 if (offset & 2)
2925 W_REG(objdata_hi, v); 2915 objoff += 2;
2926 else 2916
2927 W_REG(objdata_lo, v); 2917 bcma_write16(core, objoff, v);
2928} 2918}
2929 2919
2930/* 2920/*
@@ -3010,14 +3000,14 @@ static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw,
3010 3000
3011 /* write retry limit to SCR, shouldn't need to suspend */ 3001 /* write retry limit to SCR, shouldn't need to suspend */
3012 if (wlc_hw->up) { 3002 if (wlc_hw->up) {
3013 W_REG(&wlc_hw->regs->objaddr, 3003 bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
3014 OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); 3004 OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
3015 (void)R_REG(&wlc_hw->regs->objaddr); 3005 (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
3016 W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL); 3006 bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL);
3017 W_REG(&wlc_hw->regs->objaddr, 3007 bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
3018 OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); 3008 OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
3019 (void)R_REG(&wlc_hw->regs->objaddr); 3009 (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
3020 W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL); 3010 bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL);
3021 } 3011 }
3022} 3012}
3023 3013
@@ -3064,7 +3054,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
3064 return false; 3054 return false;
3065 3055
3066 /* disallow PS when one of these meets when not scanning */ 3056 /* disallow PS when one of these meets when not scanning */
3067 if (wlc->monitor) 3057 if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
3068 return false; 3058 return false;
3069 3059
3070 if (cfg->associated) { 3060 if (cfg->associated) {
@@ -3199,9 +3189,9 @@ void brcms_c_init_scb(struct scb *scb)
3199static void brcms_b_coreinit(struct brcms_c_info *wlc) 3189static void brcms_b_coreinit(struct brcms_c_info *wlc)
3200{ 3190{
3201 struct brcms_hardware *wlc_hw = wlc->hw; 3191 struct brcms_hardware *wlc_hw = wlc->hw;
3202 struct d11regs __iomem *regs; 3192 struct bcma_device *core = wlc_hw->d11core;
3203 u32 sflags; 3193 u32 sflags;
3204 uint bcnint_us; 3194 u32 bcnint_us;
3205 uint i = 0; 3195 uint i = 0;
3206 bool fifosz_fixup = false; 3196 bool fifosz_fixup = false;
3207 int err = 0; 3197 int err = 0;
@@ -3209,8 +3199,6 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
3209 struct wiphy *wiphy = wlc->wiphy; 3199 struct wiphy *wiphy = wlc->wiphy;
3210 struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode; 3200 struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
3211 3201
3212 regs = wlc_hw->regs;
3213
3214 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); 3202 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
3215 3203
3216 /* reset PSM */ 3204 /* reset PSM */
@@ -3223,20 +3211,20 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
3223 fifosz_fixup = true; 3211 fifosz_fixup = true;
3224 3212
3225 /* let the PSM run to the suspended state, set mode to BSS STA */ 3213 /* let the PSM run to the suspended state, set mode to BSS STA */
3226 W_REG(&regs->macintstatus, -1); 3214 bcma_write32(core, D11REGOFFS(macintstatus), -1);
3227 brcms_b_mctrl(wlc_hw, ~0, 3215 brcms_b_mctrl(wlc_hw, ~0,
3228 (MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE)); 3216 (MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
3229 3217
3230 /* wait for ucode to self-suspend after auto-init */ 3218 /* wait for ucode to self-suspend after auto-init */
3231 SPINWAIT(((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0), 3219 SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
3232 1000 * 1000); 3220 MI_MACSSPNDD) == 0), 1000 * 1000);
3233 if ((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0) 3221 if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
3234 wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-" 3222 wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
3235 "suspend!\n", wlc_hw->unit); 3223 "suspend!\n", wlc_hw->unit);
3236 3224
3237 brcms_c_gpio_init(wlc); 3225 brcms_c_gpio_init(wlc);
3238 3226
3239 sflags = ai_core_sflags(wlc_hw->sih, 0, 0); 3227 sflags = bcma_aread32(core, BCMA_IOST);
3240 3228
3241 if (D11REV_IS(wlc_hw->corerev, 23)) { 3229 if (D11REV_IS(wlc_hw->corerev, 23)) {
3242 if (BRCMS_ISNPHY(wlc_hw->band)) 3230 if (BRCMS_ISNPHY(wlc_hw->band))
@@ -3300,7 +3288,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
3300 wlc_hw->xmtfifo_sz[i], i); 3288 wlc_hw->xmtfifo_sz[i], i);
3301 3289
3302 /* make sure we can still talk to the mac */ 3290 /* make sure we can still talk to the mac */
3303 WARN_ON(R_REG(&regs->maccontrol) == 0xffffffff); 3291 WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff);
3304 3292
3305 /* band-specific inits done by wlc_bsinit() */ 3293 /* band-specific inits done by wlc_bsinit() */
3306 3294
@@ -3309,7 +3297,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
3309 brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT); 3297 brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
3310 3298
3311 /* enable one rx interrupt per received frame */ 3299 /* enable one rx interrupt per received frame */
3312 W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT)); 3300 bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT));
3313 3301
3314 /* set the station mode (BSS STA) */ 3302 /* set the station mode (BSS STA) */
3315 brcms_b_mctrl(wlc_hw, 3303 brcms_b_mctrl(wlc_hw,
@@ -3318,19 +3306,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
3318 3306
3319 /* set up Beacon interval */ 3307 /* set up Beacon interval */
3320 bcnint_us = 0x8000 << 10; 3308 bcnint_us = 0x8000 << 10;
3321 W_REG(&regs->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT)); 3309 bcma_write32(core, D11REGOFFS(tsf_cfprep),
3322 W_REG(&regs->tsf_cfpstart, bcnint_us); 3310 (bcnint_us << CFPREP_CBI_SHIFT));
3323 W_REG(&regs->macintstatus, MI_GP1); 3311 bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us);
3312 bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1);
3324 3313
3325 /* write interrupt mask */ 3314 /* write interrupt mask */
3326 W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK); 3315 bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask),
3316 DEF_RXINTMASK);
3327 3317
3328 /* allow the MAC to control the PHY clock (dynamic on/off) */ 3318 /* allow the MAC to control the PHY clock (dynamic on/off) */
3329 brcms_b_macphyclk_set(wlc_hw, ON); 3319 brcms_b_macphyclk_set(wlc_hw, ON);
3330 3320
3331 /* program dynamic clock control fast powerup delay register */ 3321 /* program dynamic clock control fast powerup delay register */
3332 wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih); 3322 wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
3333 W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly); 3323 bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly);
3334 3324
3335 /* tell the ucode the corerev */ 3325 /* tell the ucode the corerev */
3336 brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev); 3326 brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
@@ -3343,19 +3333,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
3343 machwcap >> 16) & 0xffff)); 3333 machwcap >> 16) & 0xffff));
3344 3334
3345 /* write retry limits to SCR, this done after PSM init */ 3335 /* write retry limits to SCR, this done after PSM init */
3346 W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT); 3336 bcma_write32(core, D11REGOFFS(objaddr),
3347 (void)R_REG(&regs->objaddr); 3337 OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
3348 W_REG(&regs->objdata, wlc_hw->SRL); 3338 (void)bcma_read32(core, D11REGOFFS(objaddr));
3349 W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT); 3339 bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL);
3350 (void)R_REG(&regs->objaddr); 3340 bcma_write32(core, D11REGOFFS(objaddr),
3351 W_REG(&regs->objdata, wlc_hw->LRL); 3341 OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
3342 (void)bcma_read32(core, D11REGOFFS(objaddr));
3343 bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL);
3352 3344
3353 /* write rate fallback retry limits */ 3345 /* write rate fallback retry limits */
3354 brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL); 3346 brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
3355 brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL); 3347 brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
3356 3348
3357 AND_REG(&regs->ifs_ctl, 0x0FFF); 3349 bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF);
3358 W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN); 3350 bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN);
3359 3351
3360 /* init the tx dma engines */ 3352 /* init the tx dma engines */
3361 for (i = 0; i < NFIFO; i++) { 3353 for (i = 0; i < NFIFO; i++) {
@@ -3584,29 +3576,31 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
3584} 3576}
3585 3577
3586/* 3578/*
3587 * Set or clear maccontrol bits MCTL_PROMISC, MCTL_BCNS_PROMISC and 3579 * Set or clear filtering related maccontrol bits based on
3588 * MCTL_KEEPCONTROL 3580 * specified filter flags
3589 */ 3581 */
3590static void brcms_c_mac_promisc(struct brcms_c_info *wlc) 3582void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
3591{ 3583{
3592 u32 promisc_bits = 0; 3584 u32 promisc_bits = 0;
3593 3585
3594 if (wlc->bcnmisc_monitor) 3586 wlc->filter_flags = filter_flags;
3587
3588 if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
3589 promisc_bits |= MCTL_PROMISC;
3590
3591 if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
3595 promisc_bits |= MCTL_BCNS_PROMISC; 3592 promisc_bits |= MCTL_BCNS_PROMISC;
3596 3593
3597 if (wlc->monitor) 3594 if (filter_flags & FIF_FCSFAIL)
3598 promisc_bits |= 3595 promisc_bits |= MCTL_KEEPBADFCS;
3599 MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL;
3600 3596
3601 brcms_b_mctrl(wlc->hw, 3597 if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
3602 MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL, 3598 promisc_bits |= MCTL_KEEPCONTROL;
3603 promisc_bits);
3604}
3605 3599
3606void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc) 3600 brcms_b_mctrl(wlc->hw,
3607{ 3601 MCTL_PROMISC | MCTL_BCNS_PROMISC |
3608 wlc->bcnmisc_monitor = promisc; 3602 MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
3609 brcms_c_mac_promisc(wlc); 3603 promisc_bits);
3610} 3604}
3611 3605
3612/* 3606/*
@@ -3636,9 +3630,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
3636 } else { 3630 } else {
3637 /* disable an active IBSS if we are not on the home channel */ 3631 /* disable an active IBSS if we are not on the home channel */
3638 } 3632 }
3639
3640 /* update the various promisc bits */
3641 brcms_c_mac_promisc(wlc);
3642} 3633}
3643 3634
3644static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate, 3635static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
@@ -3813,7 +3804,7 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
3813 3804
3814 BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps); 3805 BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
3815 3806
3816 v1 = R_REG(&wlc->regs->maccontrol); 3807 v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
3817 v2 = MCTL_WAKE; 3808 v2 = MCTL_WAKE;
3818 if (hps) 3809 if (hps)
3819 v2 |= MCTL_HPS; 3810 v2 |= MCTL_HPS;
@@ -4132,7 +4123,8 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
4132 acp_shm.cwmax = params->cw_max; 4123 acp_shm.cwmax = params->cw_max;
4133 acp_shm.cwcur = acp_shm.cwmin; 4124 acp_shm.cwcur = acp_shm.cwmin;
4134 acp_shm.bslots = 4125 acp_shm.bslots =
4135 R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur; 4126 bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) &
4127 acp_shm.cwcur;
4136 acp_shm.reggap = acp_shm.bslots + acp_shm.aifs; 4128 acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
4137 /* Indicate the new params to the ucode */ 4129 /* Indicate the new params to the ucode */
4138 acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO + 4130 acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO +
@@ -4440,21 +4432,21 @@ struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc)
4440 * initialize software state for each core and band 4432 * initialize software state for each core and band
4441 * put the whole chip in reset(driver down state), no clock 4433 * put the whole chip in reset(driver down state), no clock
4442 */ 4434 */
4443static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, 4435static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4444 uint unit, bool piomode, void __iomem *regsva, 4436 uint unit, bool piomode)
4445 struct pci_dev *btparam)
4446{ 4437{
4447 struct brcms_hardware *wlc_hw; 4438 struct brcms_hardware *wlc_hw;
4448 struct d11regs __iomem *regs;
4449 char *macaddr = NULL; 4439 char *macaddr = NULL;
4450 uint err = 0; 4440 uint err = 0;
4451 uint j; 4441 uint j;
4452 bool wme = false; 4442 bool wme = false;
4453 struct shared_phy_params sha_params; 4443 struct shared_phy_params sha_params;
4454 struct wiphy *wiphy = wlc->wiphy; 4444 struct wiphy *wiphy = wlc->wiphy;
4445 struct pci_dev *pcidev = core->bus->host_pci;
4455 4446
4456 BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor, 4447 BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
4457 device); 4448 pcidev->vendor,
4449 pcidev->device);
4458 4450
4459 wme = true; 4451 wme = true;
4460 4452
@@ -4471,7 +4463,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
4471 * Do the hardware portion of the attach. Also initialize software 4463 * Do the hardware portion of the attach. Also initialize software
4472 * state that depends on the particular hardware we are running. 4464 * state that depends on the particular hardware we are running.
4473 */ 4465 */
4474 wlc_hw->sih = ai_attach(regsva, btparam); 4466 wlc_hw->sih = ai_attach(core->bus);
4475 if (wlc_hw->sih == NULL) { 4467 if (wlc_hw->sih == NULL) {
4476 wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n", 4468 wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
4477 unit); 4469 unit);
@@ -4480,25 +4472,19 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
4480 } 4472 }
4481 4473
4482 /* verify again the device is supported */ 4474 /* verify again the device is supported */
4483 if (!brcms_c_chipmatch(vendor, device)) { 4475 if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
4484 wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported " 4476 wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
4485 "vendor/device (0x%x/0x%x)\n", 4477 "vendor/device (0x%x/0x%x)\n",
4486 unit, vendor, device); 4478 unit, pcidev->vendor, pcidev->device);
4487 err = 12; 4479 err = 12;
4488 goto fail; 4480 goto fail;
4489 } 4481 }
4490 4482
4491 wlc_hw->vendorid = vendor; 4483 wlc_hw->vendorid = pcidev->vendor;
4492 wlc_hw->deviceid = device; 4484 wlc_hw->deviceid = pcidev->device;
4493
4494 /* set bar0 window to point at D11 core */
4495 wlc_hw->regs = (struct d11regs __iomem *)
4496 ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
4497 wlc_hw->corerev = ai_corerev(wlc_hw->sih);
4498
4499 regs = wlc_hw->regs;
4500 4485
4501 wlc->regs = wlc_hw->regs; 4486 wlc_hw->d11core = core;
4487 wlc_hw->corerev = core->id.rev;
4502 4488
4503 /* validate chip, chiprev and corerev */ 4489 /* validate chip, chiprev and corerev */
4504 if (!brcms_c_isgoodchip(wlc_hw)) { 4490 if (!brcms_c_isgoodchip(wlc_hw)) {
@@ -4533,8 +4519,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
4533 wlc_hw->boardrev = (u16) j; 4519 wlc_hw->boardrev = (u16) j;
4534 if (!brcms_c_validboardtype(wlc_hw)) { 4520 if (!brcms_c_validboardtype(wlc_hw)) {
4535 wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom " 4521 wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
4536 "board type (0x%x)" " or revision level (0x%x)\n", 4522 "board type (0x%x)" " or revision level (0x%x)\n",
4537 unit, wlc_hw->sih->boardtype, wlc_hw->boardrev); 4523 unit, ai_get_boardtype(wlc_hw->sih),
4524 wlc_hw->boardrev);
4538 err = 15; 4525 err = 15;
4539 goto fail; 4526 goto fail;
4540 } 4527 }
@@ -4555,7 +4542,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
4555 else 4542 else
4556 wlc_hw->_nbands = 1; 4543 wlc_hw->_nbands = 1;
4557 4544
4558 if ((wlc_hw->sih->chip == BCM43225_CHIP_ID)) 4545 if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
4559 wlc_hw->_nbands = 1; 4546 wlc_hw->_nbands = 1;
4560 4547
4561 /* BMAC_NOTE: remove init of pub values when brcms_c_attach() 4548 /* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4587,16 +4574,14 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
4587 sha_params.corerev = wlc_hw->corerev; 4574 sha_params.corerev = wlc_hw->corerev;
4588 sha_params.vid = wlc_hw->vendorid; 4575 sha_params.vid = wlc_hw->vendorid;
4589 sha_params.did = wlc_hw->deviceid; 4576 sha_params.did = wlc_hw->deviceid;
4590 sha_params.chip = wlc_hw->sih->chip; 4577 sha_params.chip = ai_get_chip_id(wlc_hw->sih);
4591 sha_params.chiprev = wlc_hw->sih->chiprev; 4578 sha_params.chiprev = ai_get_chiprev(wlc_hw->sih);
4592 sha_params.chippkg = wlc_hw->sih->chippkg; 4579 sha_params.chippkg = ai_get_chippkg(wlc_hw->sih);
4593 sha_params.sromrev = wlc_hw->sromrev; 4580 sha_params.sromrev = wlc_hw->sromrev;
4594 sha_params.boardtype = wlc_hw->sih->boardtype; 4581 sha_params.boardtype = ai_get_boardtype(wlc_hw->sih);
4595 sha_params.boardrev = wlc_hw->boardrev; 4582 sha_params.boardrev = wlc_hw->boardrev;
4596 sha_params.boardvendor = wlc_hw->sih->boardvendor;
4597 sha_params.boardflags = wlc_hw->boardflags; 4583 sha_params.boardflags = wlc_hw->boardflags;
4598 sha_params.boardflags2 = wlc_hw->boardflags2; 4584 sha_params.boardflags2 = wlc_hw->boardflags2;
4599 sha_params.buscorerev = wlc_hw->sih->buscorerev;
4600 4585
4601 /* alloc and save pointer to shared phy state area */ 4586 /* alloc and save pointer to shared phy state area */
4602 wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params); 4587 wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
@@ -4618,9 +4603,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
4618 wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; 4603 wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
4619 wlc->band->bandunit = j; 4604 wlc->band->bandunit = j;
4620 wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G; 4605 wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
4621 wlc->core->coreidx = ai_coreidx(wlc_hw->sih); 4606 wlc->core->coreidx = core->core_index;
4622 4607
4623 wlc_hw->machwcap = R_REG(&regs->machwcap); 4608 wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap));
4624 wlc_hw->machwcap_backup = wlc_hw->machwcap; 4609 wlc_hw->machwcap_backup = wlc_hw->machwcap;
4625 4610
4626 /* init tx fifo size */ 4611 /* init tx fifo size */
@@ -4629,7 +4614,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
4629 4614
4630 /* Get a phy for this band */ 4615 /* Get a phy for this band */
4631 wlc_hw->band->pi = 4616 wlc_hw->band->pi =
4632 wlc_phy_attach(wlc_hw->phy_sh, regs, 4617 wlc_phy_attach(wlc_hw->phy_sh, core,
4633 wlc_hw->band->bandtype, 4618 wlc_hw->band->bandtype,
4634 wlc->wiphy); 4619 wlc->wiphy);
4635 if (wlc_hw->band->pi == NULL) { 4620 if (wlc_hw->band->pi == NULL) {
@@ -4703,10 +4688,6 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
4703 /* Match driver "down" state */ 4688 /* Match driver "down" state */
4704 ai_pci_down(wlc_hw->sih); 4689 ai_pci_down(wlc_hw->sih);
4705 4690
4706 /* register sb interrupt callback functions */
4707 ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
4708 (void *)brcms_c_wlintrsrestore, NULL, wlc);
4709
4710 /* turn off pll and xtal to match driver "down" state */ 4691 /* turn off pll and xtal to match driver "down" state */
4711 brcms_b_xtal(wlc_hw, OFF); 4692 brcms_b_xtal(wlc_hw, OFF);
4712 4693
@@ -4737,10 +4718,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
4737 goto fail; 4718 goto fail;
4738 } 4719 }
4739 4720
4740 BCMMSG(wlc->wiphy, 4721 BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
4741 "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n", 4722 wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih),
4742 wlc_hw->deviceid, wlc_hw->_nbands, 4723 macaddr);
4743 wlc_hw->sih->boardtype, macaddr);
4744 4724
4745 return err; 4725 return err;
4746 4726
@@ -4978,7 +4958,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
4978 * and per-port interrupt object may has been freed. this must 4958 * and per-port interrupt object may has been freed. this must
4979 * be done before sb core switch 4959 * be done before sb core switch
4980 */ 4960 */
4981 ai_deregister_intr_callback(wlc_hw->sih);
4982 ai_pci_sleep(wlc_hw->sih); 4961 ai_pci_sleep(wlc_hw->sih);
4983 } 4962 }
4984 4963
@@ -5073,13 +5052,11 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
5073 ai_pci_fixcfg(wlc_hw->sih); 5052 ai_pci_fixcfg(wlc_hw->sih);
5074 5053
5075 /* 5054 /*
5055 * TODO: test suspend/resume
5056 *
5076 * AI chip doesn't restore bar0win2 on 5057 * AI chip doesn't restore bar0win2 on
5077 * hibernation/resume, need sw fixup 5058 * hibernation/resume, need sw fixup
5078 */ 5059 */
5079 if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
5080 (wlc_hw->sih->chip == BCM43225_CHIP_ID))
5081 wlc_hw->regs = (struct d11regs __iomem *)
5082 ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
5083 5060
5084 /* 5061 /*
5085 * Inform phy that a POR reset has occurred so 5062 * Inform phy that a POR reset has occurred so
@@ -5091,7 +5068,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
5091 wlc_hw->wlc->pub->hw_up = true; 5068 wlc_hw->wlc->pub->hw_up = true;
5092 5069
5093 if ((wlc_hw->boardflags & BFL_FEM) 5070 if ((wlc_hw->boardflags & BFL_FEM)
5094 && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) { 5071 && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
5095 if (! 5072 if (!
5096 (wlc_hw->boardrev >= 0x1250 5073 (wlc_hw->boardrev >= 0x1250
5097 && (wlc_hw->boardflags & BFL_FEM_BT))) 5074 && (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5186,7 +5163,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
5186 } 5163 }
5187 5164
5188 if ((wlc->pub->boardflags & BFL_FEM) 5165 if ((wlc->pub->boardflags & BFL_FEM)
5189 && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) { 5166 && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
5190 if (wlc->pub->boardrev >= 0x1250 5167 if (wlc->pub->boardrev >= 0x1250
5191 && (wlc->pub->boardflags & BFL_FEM_BT)) 5168 && (wlc->pub->boardflags & BFL_FEM_BT))
5192 brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL, 5169 brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5323,9 +5300,9 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
5323 } else { 5300 } else {
5324 5301
5325 /* Reset and disable the core */ 5302 /* Reset and disable the core */
5326 if (ai_iscoreup(wlc_hw->sih)) { 5303 if (bcma_core_is_enabled(wlc_hw->d11core)) {
5327 if (R_REG(&wlc_hw->regs->maccontrol) & 5304 if (bcma_read32(wlc_hw->d11core,
5328 MCTL_EN_MAC) 5305 D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
5329 brcms_c_suspend_mac_and_wait(wlc_hw->wlc); 5306 brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
5330 callbacks += brcms_reset(wlc_hw->wlc->wl); 5307 callbacks += brcms_reset(wlc_hw->wlc->wl);
5331 brcms_c_coredisable(wlc_hw); 5308 brcms_c_coredisable(wlc_hw);
@@ -7482,11 +7459,11 @@ static void
7482brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr, 7459brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
7483 u32 *tsf_h_ptr) 7460 u32 *tsf_h_ptr)
7484{ 7461{
7485 struct d11regs __iomem *regs = wlc_hw->regs; 7462 struct bcma_device *core = wlc_hw->d11core;
7486 7463
7487 /* read the tsf timer low, then high to get an atomic read */ 7464 /* read the tsf timer low, then high to get an atomic read */
7488 *tsf_l_ptr = R_REG(&regs->tsf_timerlow); 7465 *tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow));
7489 *tsf_h_ptr = R_REG(&regs->tsf_timerhigh); 7466 *tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh));
7490} 7467}
7491 7468
7492/* 7469/*
@@ -8074,14 +8051,8 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
8074 len = p->len; 8051 len = p->len;
8075 8052
8076 if (rxh->RxStatus1 & RXS_FCSERR) { 8053 if (rxh->RxStatus1 & RXS_FCSERR) {
8077 if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) { 8054 if (!(wlc->filter_flags & FIF_FCSFAIL))
8078 wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
8079 " tossing\n");
8080 goto toss;
8081 } else {
8082 wiphy_err(wlc->wiphy, "RCSERR!!!\n");
8083 goto toss; 8055 goto toss;
8084 }
8085 } 8056 }
8086 8057
8087 /* check received pkt has at least frame control field */ 8058 /* check received pkt has at least frame control field */
@@ -8165,7 +8136,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8165{ 8136{
8166 u32 macintstatus; 8137 u32 macintstatus;
8167 struct brcms_hardware *wlc_hw = wlc->hw; 8138 struct brcms_hardware *wlc_hw = wlc->hw;
8168 struct d11regs __iomem *regs = wlc_hw->regs; 8139 struct bcma_device *core = wlc_hw->d11core;
8169 struct wiphy *wiphy = wlc->wiphy; 8140 struct wiphy *wiphy = wlc->wiphy;
8170 8141
8171 if (brcms_deviceremoved(wlc)) { 8142 if (brcms_deviceremoved(wlc)) {
@@ -8201,7 +8172,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8201 /* ATIM window end */ 8172 /* ATIM window end */
8202 if (macintstatus & MI_ATIMWINEND) { 8173 if (macintstatus & MI_ATIMWINEND) {
8203 BCMMSG(wlc->wiphy, "end of ATIM window\n"); 8174 BCMMSG(wlc->wiphy, "end of ATIM window\n");
8204 OR_REG(&regs->maccommand, wlc->qvalid); 8175 bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
8205 wlc->qvalid = 0; 8176 wlc->qvalid = 0;
8206 } 8177 }
8207 8178
@@ -8219,17 +8190,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8219 8190
8220 if (macintstatus & MI_GP0) { 8191 if (macintstatus & MI_GP0) {
8221 wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d " 8192 wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
8222 "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now); 8193 "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
8223 8194
8224 printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n", 8195 printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
8225 __func__, wlc_hw->sih->chip, 8196 __func__, ai_get_chip_id(wlc_hw->sih),
8226 wlc_hw->sih->chiprev); 8197 ai_get_chiprev(wlc_hw->sih));
8227 brcms_fatal_error(wlc_hw->wlc->wl); 8198 brcms_fatal_error(wlc_hw->wlc->wl);
8228 } 8199 }
8229 8200
8230 /* gptimer timeout */ 8201 /* gptimer timeout */
8231 if (macintstatus & MI_TO) 8202 if (macintstatus & MI_TO)
8232 W_REG(&regs->gptimer, 0); 8203 bcma_write32(core, D11REGOFFS(gptimer), 0);
8233 8204
8234 if (macintstatus & MI_RFDISABLE) { 8205 if (macintstatus & MI_RFDISABLE) {
8235 BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the" 8206 BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
@@ -8251,13 +8222,11 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8251 8222
8252void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx) 8223void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8253{ 8224{
8254 struct d11regs __iomem *regs; 8225 struct bcma_device *core = wlc->hw->d11core;
8255 u16 chanspec; 8226 u16 chanspec;
8256 8227
8257 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 8228 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
8258 8229
8259 regs = wlc->regs;
8260
8261 /* 8230 /*
8262 * This will happen if a big-hammer was executed. In 8231 * This will happen if a big-hammer was executed. In
8263 * that case, we want to go back to the channel that 8232 * that case, we want to go back to the channel that
@@ -8287,8 +8256,8 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8287 * update since init path would reset 8256 * update since init path would reset
8288 * to default value 8257 * to default value
8289 */ 8258 */
8290 W_REG(&regs->tsf_cfprep, 8259 bcma_write32(core, D11REGOFFS(tsf_cfprep),
8291 (bi << CFPREP_CBI_SHIFT)); 8260 bi << CFPREP_CBI_SHIFT);
8292 8261
8293 /* Update maccontrol PM related bits */ 8262 /* Update maccontrol PM related bits */
8294 brcms_c_set_ps_ctrl(wlc); 8263 brcms_c_set_ps_ctrl(wlc);
@@ -8318,7 +8287,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8318 brcms_c_bsinit(wlc); 8287 brcms_c_bsinit(wlc);
8319 8288
8320 /* Enable EDCF mode (while the MAC is suspended) */ 8289 /* Enable EDCF mode (while the MAC is suspended) */
8321 OR_REG(&regs->ifs_ctl, IFS_USEEDCF); 8290 bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
8322 brcms_c_edcf_setparams(wlc, false); 8291 brcms_c_edcf_setparams(wlc, false);
8323 8292
8324 /* Init precedence maps for empty FIFOs */ 8293 /* Init precedence maps for empty FIFOs */
@@ -8342,7 +8311,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8342 brcms_c_txflowcontrol_reset(wlc); 8311 brcms_c_txflowcontrol_reset(wlc);
8343 8312
8344 /* enable the RF Disable Delay timer */ 8313 /* enable the RF Disable Delay timer */
8345 W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT); 8314 bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
8346 8315
8347 /* 8316 /*
8348 * Initialize WME parameters; if they haven't been set by some other 8317 * Initialize WME parameters; if they haven't been set by some other
@@ -8362,9 +8331,8 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8362 * The common driver entry routine. Error codes should be unique 8331 * The common driver entry routine. Error codes should be unique
8363 */ 8332 */
8364struct brcms_c_info * 8333struct brcms_c_info *
8365brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit, 8334brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
8366 bool piomode, void __iomem *regsva, struct pci_dev *btparam, 8335 bool piomode, uint *perr)
8367 uint *perr)
8368{ 8336{
8369 struct brcms_c_info *wlc; 8337 struct brcms_c_info *wlc;
8370 uint err = 0; 8338 uint err = 0;
@@ -8372,7 +8340,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
8372 struct brcms_pub *pub; 8340 struct brcms_pub *pub;
8373 8341
8374 /* allocate struct brcms_c_info state and its substructures */ 8342 /* allocate struct brcms_c_info state and its substructures */
8375 wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device); 8343 wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
8376 if (wlc == NULL) 8344 if (wlc == NULL)
8377 goto fail; 8345 goto fail;
8378 wlc->wiphy = wl->wiphy; 8346 wlc->wiphy = wl->wiphy;
@@ -8399,8 +8367,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
8399 * low level attach steps(all hw accesses go 8367 * low level attach steps(all hw accesses go
8400 * inside, no more in rest of the attach) 8368 * inside, no more in rest of the attach)
8401 */ 8369 */
8402 err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva, 8370 err = brcms_b_attach(wlc, core, unit, piomode);
8403 btparam);
8404 if (err) 8371 if (err)
8405 goto fail; 8372 goto fail;
8406 8373
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index 251c350b3164..adb136ec1f04 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -334,7 +334,7 @@ struct brcms_hardware {
334 u32 machwcap_backup; /* backup of machwcap */ 334 u32 machwcap_backup; /* backup of machwcap */
335 335
336 struct si_pub *sih; /* SI handle (cookie for siutils calls) */ 336 struct si_pub *sih; /* SI handle (cookie for siutils calls) */
337 struct d11regs __iomem *regs; /* pointer to device registers */ 337 struct bcma_device *d11core; /* pointer to 802.11 core */
338 struct phy_shim_info *physhim; /* phy shim layer handler */ 338 struct phy_shim_info *physhim; /* phy shim layer handler */
339 struct shared_phy *phy_sh; /* pointer to shared phy state */ 339 struct shared_phy *phy_sh; /* pointer to shared phy state */
340 struct brcms_hw_band *band;/* pointer to active per-band state */ 340 struct brcms_hw_band *band;/* pointer to active per-band state */
@@ -400,7 +400,6 @@ struct brcms_txq_info {
400 * 400 *
401 * pub: pointer to driver public state. 401 * pub: pointer to driver public state.
402 * wl: pointer to specific private state. 402 * wl: pointer to specific private state.
403 * regs: pointer to device registers.
404 * hw: HW related state. 403 * hw: HW related state.
405 * clkreq_override: setting for clkreq for PCIE : Auto, 0, 1. 404 * clkreq_override: setting for clkreq for PCIE : Auto, 0, 1.
406 * fastpwrup_dly: time in us needed to bring up d11 fast clock. 405 * fastpwrup_dly: time in us needed to bring up d11 fast clock.
@@ -477,7 +476,6 @@ struct brcms_txq_info {
477struct brcms_c_info { 476struct brcms_c_info {
478 struct brcms_pub *pub; 477 struct brcms_pub *pub;
479 struct brcms_info *wl; 478 struct brcms_info *wl;
480 struct d11regs __iomem *regs;
481 struct brcms_hardware *hw; 479 struct brcms_hardware *hw;
482 480
483 /* clock */ 481 /* clock */
@@ -519,8 +517,7 @@ struct brcms_c_info {
519 struct brcms_timer *radio_timer; 517 struct brcms_timer *radio_timer;
520 518
521 /* promiscuous */ 519 /* promiscuous */
522 bool monitor; 520 uint filter_flags;
523 bool bcnmisc_monitor;
524 521
525 /* driver feature */ 522 /* driver feature */
526 bool _rifs; 523 bool _rifs;
@@ -658,8 +655,7 @@ extern void brcms_c_print_txdesc(struct d11txh *txh);
658#endif 655#endif
659 656
660extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config); 657extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
661extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, 658extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
662 bool promisc);
663extern void brcms_c_send_q(struct brcms_c_info *wlc); 659extern void brcms_c_send_q(struct brcms_c_info *wlc);
664extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, 660extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
665 uint *fifo); 661 uint *fifo);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
index 0bcb26792046..7fad6dc19258 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
@@ -139,6 +139,9 @@
139#define SRSH_PI_MASK 0xf000 /* bit 15:12 */ 139#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
140#define SRSH_PI_SHIFT 12 /* bit 15:12 */ 140#define SRSH_PI_SHIFT 12 /* bit 15:12 */
141 141
142#define PCIREGOFFS(field) offsetof(struct sbpciregs, field)
143#define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field)
144
142/* Sonics side: PCI core and host control registers */ 145/* Sonics side: PCI core and host control registers */
143struct sbpciregs { 146struct sbpciregs {
144 u32 control; /* PCI control */ 147 u32 control; /* PCI control */
@@ -205,11 +208,7 @@ struct sbpcieregs {
205}; 208};
206 209
207struct pcicore_info { 210struct pcicore_info {
208 union { 211 struct bcma_device *core;
209 struct sbpcieregs __iomem *pcieregs;
210 struct sbpciregs __iomem *pciregs;
211 } regs; /* Memory mapped register to the core */
212
213 struct si_pub *sih; /* System interconnect handle */ 212 struct si_pub *sih; /* System interconnect handle */
214 struct pci_dev *dev; 213 struct pci_dev *dev;
215 u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset 214 u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
@@ -224,9 +223,9 @@ struct pcicore_info {
224}; 223};
225 224
226#define PCIE_ASPM(sih) \ 225#define PCIE_ASPM(sih) \
227 (((sih)->buscoretype == PCIE_CORE_ID) && \ 226 ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \
228 (((sih)->buscorerev >= 3) && \ 227 ((ai_get_buscorerev(sih) >= 3) && \
229 ((sih)->buscorerev <= 5))) 228 (ai_get_buscorerev(sih) <= 5)))
230 229
231 230
232/* delay needed between the mdio control/ mdiodata register data access */ 231/* delay needed between the mdio control/ mdiodata register data access */
@@ -238,8 +237,7 @@ static void pr28829_delay(void)
238/* Initialize the PCI core. 237/* Initialize the PCI core.
239 * It's caller's responsibility to make sure that this is done only once 238 * It's caller's responsibility to make sure that this is done only once
240 */ 239 */
241struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev, 240struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
242 void __iomem *regs)
243{ 241{
244 struct pcicore_info *pi; 242 struct pcicore_info *pi;
245 243
@@ -249,17 +247,15 @@ struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
249 return NULL; 247 return NULL;
250 248
251 pi->sih = sih; 249 pi->sih = sih;
252 pi->dev = pdev; 250 pi->dev = core->bus->host_pci;
251 pi->core = core;
253 252
254 if (sih->buscoretype == PCIE_CORE_ID) { 253 if (core->id.id == PCIE_CORE_ID) {
255 u8 cap_ptr; 254 u8 cap_ptr;
256 pi->regs.pcieregs = regs;
257 cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP, 255 cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
258 NULL, NULL); 256 NULL, NULL);
259 pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET; 257 pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
260 } else 258 }
261 pi->regs.pciregs = regs;
262
263 return pi; 259 return pi;
264} 260}
265 261
@@ -334,37 +330,37 @@ end:
334 330
335/* ***** Register Access API */ 331/* ***** Register Access API */
336static uint 332static uint
337pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset) 333pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
338{ 334{
339 uint retval = 0xFFFFFFFF; 335 uint retval = 0xFFFFFFFF;
340 336
341 switch (addrtype) { 337 switch (addrtype) {
342 case PCIE_CONFIGREGS: 338 case PCIE_CONFIGREGS:
343 W_REG(&pcieregs->configaddr, offset); 339 bcma_write32(core, PCIEREGOFFS(configaddr), offset);
344 (void)R_REG((&pcieregs->configaddr)); 340 (void)bcma_read32(core, PCIEREGOFFS(configaddr));
345 retval = R_REG(&pcieregs->configdata); 341 retval = bcma_read32(core, PCIEREGOFFS(configdata));
346 break; 342 break;
347 case PCIE_PCIEREGS: 343 case PCIE_PCIEREGS:
348 W_REG(&pcieregs->pcieindaddr, offset); 344 bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
349 (void)R_REG(&pcieregs->pcieindaddr); 345 (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
350 retval = R_REG(&pcieregs->pcieinddata); 346 retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
351 break; 347 break;
352 } 348 }
353 349
354 return retval; 350 return retval;
355} 351}
356 352
357static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype, 353static uint pcie_writereg(struct bcma_device *core, uint addrtype,
358 uint offset, uint val) 354 uint offset, uint val)
359{ 355{
360 switch (addrtype) { 356 switch (addrtype) {
361 case PCIE_CONFIGREGS: 357 case PCIE_CONFIGREGS:
362 W_REG((&pcieregs->configaddr), offset); 358 bcma_write32(core, PCIEREGOFFS(configaddr), offset);
363 W_REG((&pcieregs->configdata), val); 359 bcma_write32(core, PCIEREGOFFS(configdata), val);
364 break; 360 break;
365 case PCIE_PCIEREGS: 361 case PCIE_PCIEREGS:
366 W_REG((&pcieregs->pcieindaddr), offset); 362 bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
367 W_REG((&pcieregs->pcieinddata), val); 363 bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
368 break; 364 break;
369 default: 365 default:
370 break; 366 break;
@@ -374,7 +370,6 @@ static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
374 370
375static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk) 371static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
376{ 372{
377 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
378 uint mdiodata, i = 0; 373 uint mdiodata, i = 0;
379 uint pcie_serdes_spinwait = 200; 374 uint pcie_serdes_spinwait = 200;
380 375
@@ -382,12 +377,13 @@ static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
382 (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) | 377 (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
383 (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | 378 (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
384 (blk << 4)); 379 (blk << 4));
385 W_REG(&pcieregs->mdiodata, mdiodata); 380 bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
386 381
387 pr28829_delay(); 382 pr28829_delay();
388 /* retry till the transaction is complete */ 383 /* retry till the transaction is complete */
389 while (i < pcie_serdes_spinwait) { 384 while (i < pcie_serdes_spinwait) {
390 if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) 385 if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
386 MDIOCTL_ACCESS_DONE)
391 break; 387 break;
392 388
393 udelay(1000); 389 udelay(1000);
@@ -404,15 +400,15 @@ static int
404pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write, 400pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
405 uint *val) 401 uint *val)
406{ 402{
407 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
408 uint mdiodata; 403 uint mdiodata;
409 uint i = 0; 404 uint i = 0;
410 uint pcie_serdes_spinwait = 10; 405 uint pcie_serdes_spinwait = 10;
411 406
412 /* enable mdio access to SERDES */ 407 /* enable mdio access to SERDES */
413 W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL); 408 bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
409 MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
414 410
415 if (pi->sih->buscorerev >= 10) { 411 if (ai_get_buscorerev(pi->sih) >= 10) {
416 /* new serdes is slower in rw, 412 /* new serdes is slower in rw,
417 * using two layers of reg address mapping 413 * using two layers of reg address mapping
418 */ 414 */
@@ -432,20 +428,22 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
432 mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA | 428 mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
433 *val); 429 *val);
434 430
435 W_REG(&pcieregs->mdiodata, mdiodata); 431 bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
436 432
437 pr28829_delay(); 433 pr28829_delay();
438 434
439 /* retry till the transaction is complete */ 435 /* retry till the transaction is complete */
440 while (i < pcie_serdes_spinwait) { 436 while (i < pcie_serdes_spinwait) {
441 if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) { 437 if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
438 MDIOCTL_ACCESS_DONE) {
442 if (!write) { 439 if (!write) {
443 pr28829_delay(); 440 pr28829_delay();
444 *val = (R_REG(&pcieregs->mdiodata) & 441 *val = (bcma_read32(pi->core,
442 PCIEREGOFFS(mdiodata)) &
445 MDIODATA_MASK); 443 MDIODATA_MASK);
446 } 444 }
447 /* Disable mdio access to SERDES */ 445 /* Disable mdio access to SERDES */
448 W_REG(&pcieregs->mdiocontrol, 0); 446 bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
449 return 0; 447 return 0;
450 } 448 }
451 udelay(1000); 449 udelay(1000);
@@ -453,7 +451,7 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
453 } 451 }
454 452
455 /* Timed out. Disable mdio access to SERDES. */ 453 /* Timed out. Disable mdio access to SERDES. */
456 W_REG(&pcieregs->mdiocontrol, 0); 454 bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
457 return 1; 455 return 1;
458} 456}
459 457
@@ -502,18 +500,18 @@ static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
502{ 500{
503 u32 w; 501 u32 w;
504 struct si_pub *sih = pi->sih; 502 struct si_pub *sih = pi->sih;
505 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
506 503
507 if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7) 504 if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
505 ai_get_buscorerev(sih) < 7)
508 return; 506 return;
509 507
510 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); 508 w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
511 if (extend) 509 if (extend)
512 w |= PCIE_ASPMTIMER_EXTEND; 510 w |= PCIE_ASPMTIMER_EXTEND;
513 else 511 else
514 w &= ~PCIE_ASPMTIMER_EXTEND; 512 w &= ~PCIE_ASPMTIMER_EXTEND;
515 pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w); 513 pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
516 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG); 514 w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
517} 515}
518 516
519/* centralized clkreq control policy */ 517/* centralized clkreq control policy */
@@ -527,25 +525,27 @@ static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
527 pcie_clkreq(pi, 1, 0); 525 pcie_clkreq(pi, 1, 0);
528 break; 526 break;
529 case SI_PCIDOWN: 527 case SI_PCIDOWN:
530 if (sih->buscorerev == 6) { /* turn on serdes PLL down */ 528 /* turn on serdes PLL down */
531 ai_corereg(sih, SI_CC_IDX, 529 if (ai_get_buscorerev(sih) == 6) {
532 offsetof(struct chipcregs, chipcontrol_addr), 530 ai_cc_reg(sih,
533 ~0, 0); 531 offsetof(struct chipcregs, chipcontrol_addr),
534 ai_corereg(sih, SI_CC_IDX, 532 ~0, 0);
535 offsetof(struct chipcregs, chipcontrol_data), 533 ai_cc_reg(sih,
536 ~0x40, 0); 534 offsetof(struct chipcregs, chipcontrol_data),
535 ~0x40, 0);
537 } else if (pi->pcie_pr42767) { 536 } else if (pi->pcie_pr42767) {
538 pcie_clkreq(pi, 1, 1); 537 pcie_clkreq(pi, 1, 1);
539 } 538 }
540 break; 539 break;
541 case SI_PCIUP: 540 case SI_PCIUP:
542 if (sih->buscorerev == 6) { /* turn off serdes PLL down */ 541 /* turn off serdes PLL down */
543 ai_corereg(sih, SI_CC_IDX, 542 if (ai_get_buscorerev(sih) == 6) {
544 offsetof(struct chipcregs, chipcontrol_addr), 543 ai_cc_reg(sih,
545 ~0, 0); 544 offsetof(struct chipcregs, chipcontrol_addr),
546 ai_corereg(sih, SI_CC_IDX, 545 ~0, 0);
547 offsetof(struct chipcregs, chipcontrol_data), 546 ai_cc_reg(sih,
548 ~0x40, 0x40); 547 offsetof(struct chipcregs, chipcontrol_data),
548 ~0x40, 0x40);
549 } else if (PCIE_ASPM(sih)) { /* disable clkreq */ 549 } else if (PCIE_ASPM(sih)) { /* disable clkreq */
550 pcie_clkreq(pi, 1, 0); 550 pcie_clkreq(pi, 1, 0);
551 } 551 }
@@ -562,7 +562,7 @@ static void pcie_war_polarity(struct pcicore_info *pi)
562 if (pi->pcie_polarity != 0) 562 if (pi->pcie_polarity != 0)
563 return; 563 return;
564 564
565 w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG); 565 w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
566 566
567 /* Detect the current polarity at attach and force that polarity and 567 /* Detect the current polarity at attach and force that polarity and
568 * disable changing the polarity 568 * disable changing the polarity
@@ -581,18 +581,15 @@ static void pcie_war_polarity(struct pcicore_info *pi)
581 */ 581 */
582static void pcie_war_aspm_clkreq(struct pcicore_info *pi) 582static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
583{ 583{
584 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
585 struct si_pub *sih = pi->sih; 584 struct si_pub *sih = pi->sih;
586 u16 val16; 585 u16 val16;
587 u16 __iomem *reg16;
588 u32 w; 586 u32 w;
589 587
590 if (!PCIE_ASPM(sih)) 588 if (!PCIE_ASPM(sih))
591 return; 589 return;
592 590
593 /* bypass this on QT or VSIM */ 591 /* bypass this on QT or VSIM */
594 reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET]; 592 val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
595 val16 = R_REG(reg16);
596 593
597 val16 &= ~SRSH_ASPM_ENB; 594 val16 &= ~SRSH_ASPM_ENB;
598 if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB) 595 if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
@@ -602,15 +599,15 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
602 else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB) 599 else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
603 val16 |= SRSH_ASPM_L0s_ENB; 600 val16 |= SRSH_ASPM_L0s_ENB;
604 601
605 W_REG(reg16, val16); 602 bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
606 603
607 pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w); 604 pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
608 w &= ~PCIE_ASPM_ENAB; 605 w &= ~PCIE_ASPM_ENAB;
609 w |= pi->pcie_war_aspm_ovr; 606 w |= pi->pcie_war_aspm_ovr;
610 pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w); 607 pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
611 608
612 reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5]; 609 val16 = bcma_read16(pi->core,
613 val16 = R_REG(reg16); 610 PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
614 611
615 if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) { 612 if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
616 val16 |= SRSH_CLKREQ_ENB; 613 val16 |= SRSH_CLKREQ_ENB;
@@ -618,7 +615,8 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
618 } else 615 } else
619 val16 &= ~SRSH_CLKREQ_ENB; 616 val16 &= ~SRSH_CLKREQ_ENB;
620 617
621 W_REG(reg16, val16); 618 bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
619 val16);
622} 620}
623 621
624/* Apply the polarity determined at the start */ 622/* Apply the polarity determined at the start */
@@ -642,16 +640,15 @@ static void pcie_war_serdes(struct pcicore_info *pi)
642/* Needs to happen when coming out of 'standby'/'hibernate' */ 640/* Needs to happen when coming out of 'standby'/'hibernate' */
643static void pcie_misc_config_fixup(struct pcicore_info *pi) 641static void pcie_misc_config_fixup(struct pcicore_info *pi)
644{ 642{
645 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
646 u16 val16; 643 u16 val16;
647 u16 __iomem *reg16;
648 644
649 reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG]; 645 val16 = bcma_read16(pi->core,
650 val16 = R_REG(reg16); 646 PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
651 647
652 if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) { 648 if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
653 val16 |= SRSH_L23READY_EXIT_NOPERST; 649 val16 |= SRSH_L23READY_EXIT_NOPERST;
654 W_REG(reg16, val16); 650 bcma_write16(pi->core,
651 PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
655 } 652 }
656} 653}
657 654
@@ -659,62 +656,57 @@ static void pcie_misc_config_fixup(struct pcicore_info *pi)
659/* Needs to happen when coming out of 'standby'/'hibernate' */ 656/* Needs to happen when coming out of 'standby'/'hibernate' */
660static void pcie_war_noplldown(struct pcicore_info *pi) 657static void pcie_war_noplldown(struct pcicore_info *pi)
661{ 658{
662 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
663 u16 __iomem *reg16;
664
665 /* turn off serdes PLL down */ 659 /* turn off serdes PLL down */
666 ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol), 660 ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
667 CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN); 661 CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
668 662
669 /* clear srom shadow backdoor */ 663 /* clear srom shadow backdoor */
670 reg16 = &pcieregs->sprom[SRSH_BD_OFFSET]; 664 bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
671 W_REG(reg16, 0);
672} 665}
673 666
674/* Needs to happen when coming out of 'standby'/'hibernate' */ 667/* Needs to happen when coming out of 'standby'/'hibernate' */
675static void pcie_war_pci_setup(struct pcicore_info *pi) 668static void pcie_war_pci_setup(struct pcicore_info *pi)
676{ 669{
677 struct si_pub *sih = pi->sih; 670 struct si_pub *sih = pi->sih;
678 struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
679 u32 w; 671 u32 w;
680 672
681 if (sih->buscorerev == 0 || sih->buscorerev == 1) { 673 if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
682 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, 674 w = pcie_readreg(pi->core, PCIE_PCIEREGS,
683 PCIE_TLP_WORKAROUNDSREG); 675 PCIE_TLP_WORKAROUNDSREG);
684 w |= 0x8; 676 w |= 0x8;
685 pcie_writereg(pcieregs, PCIE_PCIEREGS, 677 pcie_writereg(pi->core, PCIE_PCIEREGS,
686 PCIE_TLP_WORKAROUNDSREG, w); 678 PCIE_TLP_WORKAROUNDSREG, w);
687 } 679 }
688 680
689 if (sih->buscorerev == 1) { 681 if (ai_get_buscorerev(sih) == 1) {
690 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG); 682 w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
691 w |= 0x40; 683 w |= 0x40;
692 pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w); 684 pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
693 } 685 }
694 686
695 if (sih->buscorerev == 0) { 687 if (ai_get_buscorerev(sih) == 0) {
696 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128); 688 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
697 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100); 689 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
698 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466); 690 pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
699 } else if (PCIE_ASPM(sih)) { 691 } else if (PCIE_ASPM(sih)) {
700 /* Change the L1 threshold for better performance */ 692 /* Change the L1 threshold for better performance */
701 w = pcie_readreg(pcieregs, PCIE_PCIEREGS, 693 w = pcie_readreg(pi->core, PCIE_PCIEREGS,
702 PCIE_DLLP_PMTHRESHREG); 694 PCIE_DLLP_PMTHRESHREG);
703 w &= ~PCIE_L1THRESHOLDTIME_MASK; 695 w &= ~PCIE_L1THRESHOLDTIME_MASK;
704 w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT; 696 w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
705 pcie_writereg(pcieregs, PCIE_PCIEREGS, 697 pcie_writereg(pi->core, PCIE_PCIEREGS,
706 PCIE_DLLP_PMTHRESHREG, w); 698 PCIE_DLLP_PMTHRESHREG, w);
707 699
708 pcie_war_serdes(pi); 700 pcie_war_serdes(pi);
709 701
710 pcie_war_aspm_clkreq(pi); 702 pcie_war_aspm_clkreq(pi);
711 } else if (pi->sih->buscorerev == 7) 703 } else if (ai_get_buscorerev(pi->sih) == 7)
712 pcie_war_noplldown(pi); 704 pcie_war_noplldown(pi);
713 705
714 /* Note that the fix is actually in the SROM, 706 /* Note that the fix is actually in the SROM,
715 * that's why this is open-ended 707 * that's why this is open-ended
716 */ 708 */
717 if (pi->sih->buscorerev >= 6) 709 if (ai_get_buscorerev(pi->sih) >= 6)
718 pcie_misc_config_fixup(pi); 710 pcie_misc_config_fixup(pi);
719} 711}
720 712
@@ -745,7 +737,7 @@ void pcicore_attach(struct pcicore_info *pi, int state)
745 737
746void pcicore_hwup(struct pcicore_info *pi) 738void pcicore_hwup(struct pcicore_info *pi)
747{ 739{
748 if (!pi || pi->sih->buscoretype != PCIE_CORE_ID) 740 if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
749 return; 741 return;
750 742
751 pcie_war_pci_setup(pi); 743 pcie_war_pci_setup(pi);
@@ -753,7 +745,7 @@ void pcicore_hwup(struct pcicore_info *pi)
753 745
754void pcicore_up(struct pcicore_info *pi, int state) 746void pcicore_up(struct pcicore_info *pi, int state)
755{ 747{
756 if (!pi || pi->sih->buscoretype != PCIE_CORE_ID) 748 if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
757 return; 749 return;
758 750
759 /* Restore L1 timer for better performance */ 751 /* Restore L1 timer for better performance */
@@ -781,7 +773,7 @@ void pcicore_sleep(struct pcicore_info *pi)
781 773
782void pcicore_down(struct pcicore_info *pi, int state) 774void pcicore_down(struct pcicore_info *pi, int state)
783{ 775{
784 if (!pi || pi->sih->buscoretype != PCIE_CORE_ID) 776 if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
785 return; 777 return;
786 778
787 pcie_clkreq_upd(pi, state); 779 pcie_clkreq_upd(pi, state);
@@ -790,46 +782,45 @@ void pcicore_down(struct pcicore_info *pi, int state)
790 pcie_extendL1timer(pi, false); 782 pcie_extendL1timer(pi, false);
791} 783}
792 784
793/* precondition: current core is sii->buscoretype */ 785void pcicore_fixcfg(struct pcicore_info *pi)
794static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16)
795{ 786{
796 struct si_info *sii = (struct si_info *)(pi->sih); 787 struct bcma_device *core = pi->core;
797 u16 val16; 788 u16 val16;
798 uint pciidx; 789 uint regoff;
799 790
800 pciidx = ai_coreidx(&sii->pub); 791 switch (pi->core->id.id) {
801 val16 = R_REG(reg16); 792 case BCMA_CORE_PCI:
802 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) { 793 regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
803 val16 = (u16)(pciidx << SRSH_PI_SHIFT) | 794 break;
804 (val16 & ~SRSH_PI_MASK);
805 W_REG(reg16, val16);
806 }
807}
808 795
809void 796 case BCMA_CORE_PCIE:
810pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs) 797 regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
811{ 798 break;
812 pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]);
813}
814 799
815void pcicore_fixcfg_pcie(struct pcicore_info *pi, 800 default:
816 struct sbpcieregs __iomem *pcieregs) 801 return;
817{ 802 }
818 pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]); 803
804 val16 = bcma_read16(pi->core, regoff);
805 if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
806 (u16)core->core_index) {
807 val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
808 (val16 & ~SRSH_PI_MASK);
809 bcma_write16(pi->core, regoff, val16);
810 }
819} 811}
820 812
821/* precondition: current core is pci core */ 813/* precondition: current core is pci core */
822void 814void
823pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs) 815pcicore_pci_setup(struct pcicore_info *pi)
824{ 816{
825 u32 w; 817 bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
826 818 SBTOPCI_PREF | SBTOPCI_BURST);
827 OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST); 819
828 820 if (pi->core->id.rev >= 11) {
829 if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) { 821 bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
830 OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI); 822 SBTOPCI_RC_READMULTI);
831 w = R_REG(&pciregs->clkrun); 823 bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
832 W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL); 824 (void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
833 w = R_REG(&pciregs->clkrun);
834 } 825 }
835} 826}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
index 58aa80dc3329..9fc3ead540a8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
@@ -62,8 +62,7 @@ struct sbpciregs;
62struct sbpcieregs; 62struct sbpcieregs;
63 63
64extern struct pcicore_info *pcicore_init(struct si_pub *sih, 64extern struct pcicore_info *pcicore_init(struct si_pub *sih,
65 struct pci_dev *pdev, 65 struct bcma_device *core);
66 void __iomem *regs);
67extern void pcicore_deinit(struct pcicore_info *pch); 66extern void pcicore_deinit(struct pcicore_info *pch);
68extern void pcicore_attach(struct pcicore_info *pch, int state); 67extern void pcicore_attach(struct pcicore_info *pch, int state);
69extern void pcicore_hwup(struct pcicore_info *pch); 68extern void pcicore_hwup(struct pcicore_info *pch);
@@ -72,11 +71,7 @@ extern void pcicore_sleep(struct pcicore_info *pch);
72extern void pcicore_down(struct pcicore_info *pch, int state); 71extern void pcicore_down(struct pcicore_info *pch, int state);
73extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id, 72extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
74 unsigned char *buf, u32 *buflen); 73 unsigned char *buf, u32 *buflen);
75extern void pcicore_fixcfg_pci(struct pcicore_info *pch, 74extern void pcicore_fixcfg(struct pcicore_info *pch);
76 struct sbpciregs __iomem *pciregs); 75extern void pcicore_pci_setup(struct pcicore_info *pch);
77extern void pcicore_fixcfg_pcie(struct pcicore_info *pch,
78 struct sbpcieregs __iomem *pciregs);
79extern void pcicore_pci_setup(struct pcicore_info *pch,
80 struct sbpciregs __iomem *pciregs);
81 76
82#endif /* _BRCM_NICPCI_H_ */ 77#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
index edf551561fd8..f1ca12625860 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
@@ -77,7 +77,7 @@ struct otp_fn_s {
77}; 77};
78 78
79struct otpinfo { 79struct otpinfo {
80 uint ccrev; /* chipc revision */ 80 struct bcma_device *core; /* chipc core */
81 const struct otp_fn_s *fn; /* OTP functions */ 81 const struct otp_fn_s *fn; /* OTP functions */
82 struct si_pub *sih; /* Saved sb handle */ 82 struct si_pub *sih; /* Saved sb handle */
83 83
@@ -133,9 +133,10 @@ struct otpinfo {
133#define OTP_SZ_FU_144 (144/8) /* 144 bits */ 133#define OTP_SZ_FU_144 (144/8) /* 144 bits */
134 134
135static u16 135static u16
136ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn) 136ipxotp_otpr(struct otpinfo *oi, uint wn)
137{ 137{
138 return R_REG(&cc->sromotp[wn]); 138 return bcma_read16(oi->core,
139 CHIPCREGOFFS(sromotp[wn]));
139} 140}
140 141
141/* 142/*
@@ -146,7 +147,7 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
146{ 147{
147 int ret = 0; 148 int ret = 0;
148 149
149 switch (sih->chip) { 150 switch (ai_get_chip_id(sih)) {
150 case BCM43224_CHIP_ID: 151 case BCM43224_CHIP_ID:
151 case BCM43225_CHIP_ID: 152 case BCM43225_CHIP_ID:
152 ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM; 153 ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
@@ -161,19 +162,21 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
161 return ret; 162 return ret;
162} 163}
163 164
164static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc) 165static void _ipxotp_init(struct otpinfo *oi)
165{ 166{
166 uint k; 167 uint k;
167 u32 otpp, st; 168 u32 otpp, st;
169 int ccrev = ai_get_ccrev(oi->sih);
170
168 171
169 /* 172 /*
170 * record word offset of General Use Region 173 * record word offset of General Use Region
171 * for various chipcommon revs 174 * for various chipcommon revs
172 */ 175 */
173 if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24 176 if (ccrev == 21 || ccrev == 24
174 || oi->sih->ccrev == 27) { 177 || ccrev == 27) {
175 oi->otpgu_base = REVA4_OTPGU_BASE; 178 oi->otpgu_base = REVA4_OTPGU_BASE;
176 } else if (oi->sih->ccrev == 36) { 179 } else if (ccrev == 36) {
177 /* 180 /*
178 * OTP size greater than equal to 2KB (128 words), 181 * OTP size greater than equal to 2KB (128 words),
179 * otpgu_base is similar to rev23 182 * otpgu_base is similar to rev23
@@ -182,7 +185,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
182 oi->otpgu_base = REVB8_OTPGU_BASE; 185 oi->otpgu_base = REVB8_OTPGU_BASE;
183 else 186 else
184 oi->otpgu_base = REV36_OTPGU_BASE; 187 oi->otpgu_base = REV36_OTPGU_BASE;
185 } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) { 188 } else if (ccrev == 23 || ccrev >= 25) {
186 oi->otpgu_base = REVB8_OTPGU_BASE; 189 oi->otpgu_base = REVB8_OTPGU_BASE;
187 } 190 }
188 191
@@ -190,24 +193,21 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
190 otpp = 193 otpp =
191 OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK); 194 OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
192 195
193 W_REG(&cc->otpprog, otpp); 196 bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp);
194 for (k = 0; 197 st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
195 ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY) 198 for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++)
196 && (k < OTPP_TRIES); k++) 199 st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
197 ;
198 if (k >= OTPP_TRIES) 200 if (k >= OTPP_TRIES)
199 return; 201 return;
200 202
201 /* Read OTP lock bits and subregion programmed indication bits */ 203 /* Read OTP lock bits and subregion programmed indication bits */
202 oi->status = R_REG(&cc->otpstatus); 204 oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus));
203 205
204 if ((oi->sih->chip == BCM43224_CHIP_ID) 206 if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID)
205 || (oi->sih->chip == BCM43225_CHIP_ID)) { 207 || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) {
206 u32 p_bits; 208 u32 p_bits;
207 p_bits = 209 p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) &
208 (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) & 210 OTPGU_P_MSK) >> OTPGU_P_SHIFT;
209 OTPGU_P_MSK)
210 >> OTPGU_P_SHIFT;
211 oi->status |= (p_bits << OTPS_GUP_SHIFT); 211 oi->status |= (p_bits << OTPS_GUP_SHIFT);
212 } 212 }
213 213
@@ -220,7 +220,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
220 oi->hwlim = oi->wsize; 220 oi->hwlim = oi->wsize;
221 if (oi->status & OTPS_GUP_HW) { 221 if (oi->status & OTPS_GUP_HW) {
222 oi->hwlim = 222 oi->hwlim =
223 ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16; 223 ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
224 oi->swbase = oi->hwlim; 224 oi->swbase = oi->hwlim;
225 } else 225 } else
226 oi->swbase = oi->hwbase; 226 oi->swbase = oi->hwbase;
@@ -230,7 +230,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
230 230
231 if (oi->status & OTPS_GUP_SW) { 231 if (oi->status & OTPS_GUP_SW) {
232 oi->swlim = 232 oi->swlim =
233 ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16; 233 ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
234 oi->fbase = oi->swlim; 234 oi->fbase = oi->swlim;
235 } else 235 } else
236 oi->fbase = oi->swbase; 236 oi->fbase = oi->swbase;
@@ -240,11 +240,8 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
240 240
241static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi) 241static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
242{ 242{
243 uint idx;
244 struct chipcregs __iomem *cc;
245
246 /* Make sure we're running IPX OTP */ 243 /* Make sure we're running IPX OTP */
247 if (!OTPTYPE_IPX(sih->ccrev)) 244 if (!OTPTYPE_IPX(ai_get_ccrev(sih)))
248 return -EBADE; 245 return -EBADE;
249 246
250 /* Make sure OTP is not disabled */ 247 /* Make sure OTP is not disabled */
@@ -252,7 +249,7 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
252 return -EBADE; 249 return -EBADE;
253 250
254 /* Check for otp size */ 251 /* Check for otp size */
255 switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) { 252 switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
256 case 0: 253 case 0:
257 /* Nothing there */ 254 /* Nothing there */
258 return -EBADE; 255 return -EBADE;
@@ -282,21 +279,13 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
282 } 279 }
283 280
284 /* Retrieve OTP region info */ 281 /* Retrieve OTP region info */
285 idx = ai_coreidx(sih); 282 _ipxotp_init(oi);
286 cc = ai_setcoreidx(sih, SI_CC_IDX);
287
288 _ipxotp_init(oi, cc);
289
290 ai_setcoreidx(sih, idx);
291
292 return 0; 283 return 0;
293} 284}
294 285
295static int 286static int
296ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen) 287ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
297{ 288{
298 uint idx;
299 struct chipcregs __iomem *cc;
300 uint base, i, sz; 289 uint base, i, sz;
301 290
302 /* Validate region selection */ 291 /* Validate region selection */
@@ -365,14 +354,10 @@ ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
365 return -EINVAL; 354 return -EINVAL;
366 } 355 }
367 356
368 idx = ai_coreidx(oi->sih);
369 cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
370
371 /* Read the data */ 357 /* Read the data */
372 for (i = 0; i < sz; i++) 358 for (i = 0; i < sz; i++)
373 data[i] = ipxotp_otpr(oi, cc, base + i); 359 data[i] = ipxotp_otpr(oi, base + i);
374 360
375 ai_setcoreidx(oi->sih, idx);
376 *wlen = sz; 361 *wlen = sz;
377 return 0; 362 return 0;
378} 363}
@@ -384,14 +369,13 @@ static const struct otp_fn_s ipxotp_fn = {
384 369
385static int otp_init(struct si_pub *sih, struct otpinfo *oi) 370static int otp_init(struct si_pub *sih, struct otpinfo *oi)
386{ 371{
387
388 int ret; 372 int ret;
389 373
390 memset(oi, 0, sizeof(struct otpinfo)); 374 memset(oi, 0, sizeof(struct otpinfo));
391 375
392 oi->ccrev = sih->ccrev; 376 oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
393 377
394 if (OTPTYPE_IPX(oi->ccrev)) 378 if (OTPTYPE_IPX(ai_get_ccrev(sih)))
395 oi->fn = &ipxotp_fn; 379 oi->fn = &ipxotp_fn;
396 380
397 if (oi->fn == NULL) 381 if (oi->fn == NULL)
@@ -399,7 +383,7 @@ static int otp_init(struct si_pub *sih, struct otpinfo *oi)
399 383
400 oi->sih = sih; 384 oi->sih = sih;
401 385
402 ret = (oi->fn->init) (sih, oi); 386 ret = (oi->fn->init)(sih, oi);
403 387
404 return ret; 388 return ret;
405} 389}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index e17edf7e6833..264f8c4c703d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -109,7 +109,7 @@ static const struct chan_info_basic chan_info_all[] = {
109 {204, 5020}, 109 {204, 5020},
110 {208, 5040}, 110 {208, 5040},
111 {212, 5060}, 111 {212, 5060},
112 {216, 50800} 112 {216, 5080}
113}; 113};
114 114
115static const u8 ofdm_rate_lookup[] = { 115static const u8 ofdm_rate_lookup[] = {
@@ -149,9 +149,8 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih)
149void wlc_radioreg_exit(struct brcms_phy_pub *pih) 149void wlc_radioreg_exit(struct brcms_phy_pub *pih)
150{ 150{
151 struct brcms_phy *pi = (struct brcms_phy *) pih; 151 struct brcms_phy *pi = (struct brcms_phy *) pih;
152 u16 dummy;
153 152
154 dummy = R_REG(&pi->regs->phyversion); 153 (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
155 pi->phy_wreg = 0; 154 pi->phy_wreg = 0;
156 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0); 155 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0);
157} 156}
@@ -186,11 +185,11 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
186 if ((D11REV_GE(pi->sh->corerev, 24)) || 185 if ((D11REV_GE(pi->sh->corerev, 24)) ||
187 (D11REV_IS(pi->sh->corerev, 22) 186 (D11REV_IS(pi->sh->corerev, 22)
188 && (pi->pubpi.phy_type != PHY_TYPE_SSN))) { 187 && (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
189 W_REG_FLUSH(&pi->regs->radioregaddr, addr); 188 bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
190 data = R_REG(&pi->regs->radioregdata); 189 data = bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
191 } else { 190 } else {
192 W_REG_FLUSH(&pi->regs->phy4waddr, addr); 191 bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
193 data = R_REG(&pi->regs->phy4wdatalo); 192 data = bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
194 } 193 }
195 pi->phy_wreg = 0; 194 pi->phy_wreg = 0;
196 195
@@ -203,15 +202,15 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
203 (D11REV_IS(pi->sh->corerev, 22) 202 (D11REV_IS(pi->sh->corerev, 22)
204 && (pi->pubpi.phy_type != PHY_TYPE_SSN))) { 203 && (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
205 204
206 W_REG_FLUSH(&pi->regs->radioregaddr, addr); 205 bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
207 W_REG(&pi->regs->radioregdata, val); 206 bcma_write16(pi->d11core, D11REGOFFS(radioregdata), val);
208 } else { 207 } else {
209 W_REG_FLUSH(&pi->regs->phy4waddr, addr); 208 bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
210 W_REG(&pi->regs->phy4wdatalo, val); 209 bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
211 } 210 }
212 211
213 if (++pi->phy_wreg >= pi->phy_wreg_limit) { 212 if (++pi->phy_wreg >= pi->phy_wreg_limit) {
214 (void)R_REG(&pi->regs->maccontrol); 213 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
215 pi->phy_wreg = 0; 214 pi->phy_wreg = 0;
216 } 215 }
217} 216}
@@ -223,19 +222,20 @@ static u32 read_radio_id(struct brcms_phy *pi)
223 if (D11REV_GE(pi->sh->corerev, 24)) { 222 if (D11REV_GE(pi->sh->corerev, 24)) {
224 u32 b0, b1, b2; 223 u32 b0, b1, b2;
225 224
226 W_REG_FLUSH(&pi->regs->radioregaddr, 0); 225 bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 0);
227 b0 = (u32) R_REG(&pi->regs->radioregdata); 226 b0 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
228 W_REG_FLUSH(&pi->regs->radioregaddr, 1); 227 bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 1);
229 b1 = (u32) R_REG(&pi->regs->radioregdata); 228 b1 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
230 W_REG_FLUSH(&pi->regs->radioregaddr, 2); 229 bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 2);
231 b2 = (u32) R_REG(&pi->regs->radioregdata); 230 b2 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
232 231
233 id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4) 232 id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4)
234 & 0xf); 233 & 0xf);
235 } else { 234 } else {
236 W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE); 235 bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), RADIO_IDCODE);
237 id = (u32) R_REG(&pi->regs->phy4wdatalo); 236 id = (u32) bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
238 id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16; 237 id |= (u32) bcma_read16(pi->d11core,
238 D11REGOFFS(phy4wdatahi)) << 16;
239 } 239 }
240 pi->phy_wreg = 0; 240 pi->phy_wreg = 0;
241 return id; 241 return id;
@@ -275,75 +275,52 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
275 275
276void write_phy_channel_reg(struct brcms_phy *pi, uint val) 276void write_phy_channel_reg(struct brcms_phy *pi, uint val)
277{ 277{
278 W_REG(&pi->regs->phychannel, val); 278 bcma_write16(pi->d11core, D11REGOFFS(phychannel), val);
279} 279}
280 280
281u16 read_phy_reg(struct brcms_phy *pi, u16 addr) 281u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
282{ 282{
283 struct d11regs __iomem *regs; 283 bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
284
285 regs = pi->regs;
286
287 W_REG_FLUSH(&regs->phyregaddr, addr);
288 284
289 pi->phy_wreg = 0; 285 pi->phy_wreg = 0;
290 return R_REG(&regs->phyregdata); 286 return bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
291} 287}
292 288
293void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val) 289void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
294{ 290{
295 struct d11regs __iomem *regs;
296
297 regs = pi->regs;
298
299#ifdef CONFIG_BCM47XX 291#ifdef CONFIG_BCM47XX
300 W_REG_FLUSH(&regs->phyregaddr, addr); 292 bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
301 W_REG(&regs->phyregdata, val); 293 bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
302 if (addr == 0x72) 294 if (addr == 0x72)
303 (void)R_REG(&regs->phyregdata); 295 (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
304#else 296#else
305 W_REG((u32 __iomem *)(&regs->phyregaddr), addr | (val << 16)); 297 bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
306 if (++pi->phy_wreg >= pi->phy_wreg_limit) { 298 if (++pi->phy_wreg >= pi->phy_wreg_limit) {
307 pi->phy_wreg = 0; 299 pi->phy_wreg = 0;
308 (void)R_REG(&regs->phyversion); 300 (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
309 } 301 }
310#endif 302#endif
311} 303}
312 304
313void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val) 305void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
314{ 306{
315 struct d11regs __iomem *regs; 307 bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
316 308 bcma_mask16(pi->d11core, D11REGOFFS(phyregdata), val);
317 regs = pi->regs;
318
319 W_REG_FLUSH(&regs->phyregaddr, addr);
320
321 W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) & val));
322 pi->phy_wreg = 0; 309 pi->phy_wreg = 0;
323} 310}
324 311
325void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val) 312void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
326{ 313{
327 struct d11regs __iomem *regs; 314 bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
328 315 bcma_set16(pi->d11core, D11REGOFFS(phyregdata), val);
329 regs = pi->regs;
330
331 W_REG_FLUSH(&regs->phyregaddr, addr);
332
333 W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) | val));
334 pi->phy_wreg = 0; 316 pi->phy_wreg = 0;
335} 317}
336 318
337void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val) 319void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
338{ 320{
339 struct d11regs __iomem *regs; 321 val &= mask;
340 322 bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
341 regs = pi->regs; 323 bcma_maskset16(pi->d11core, D11REGOFFS(phyregdata), ~mask, val);
342
343 W_REG_FLUSH(&regs->phyregaddr, addr);
344
345 W_REG(&regs->phyregdata,
346 ((R_REG(&regs->phyregdata) & ~mask) | (val & mask)));
347 pi->phy_wreg = 0; 324 pi->phy_wreg = 0;
348} 325}
349 326
@@ -404,10 +381,8 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
404 sh->sromrev = shp->sromrev; 381 sh->sromrev = shp->sromrev;
405 sh->boardtype = shp->boardtype; 382 sh->boardtype = shp->boardtype;
406 sh->boardrev = shp->boardrev; 383 sh->boardrev = shp->boardrev;
407 sh->boardvendor = shp->boardvendor;
408 sh->boardflags = shp->boardflags; 384 sh->boardflags = shp->boardflags;
409 sh->boardflags2 = shp->boardflags2; 385 sh->boardflags2 = shp->boardflags2;
410 sh->buscorerev = shp->buscorerev;
411 386
412 sh->fast_timer = PHY_SW_TIMER_FAST; 387 sh->fast_timer = PHY_SW_TIMER_FAST;
413 sh->slow_timer = PHY_SW_TIMER_SLOW; 388 sh->slow_timer = PHY_SW_TIMER_SLOW;
@@ -450,7 +425,7 @@ static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi)
450} 425}
451 426
452struct brcms_phy_pub * 427struct brcms_phy_pub *
453wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs, 428wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
454 int bandtype, struct wiphy *wiphy) 429 int bandtype, struct wiphy *wiphy)
455{ 430{
456 struct brcms_phy *pi; 431 struct brcms_phy *pi;
@@ -462,7 +437,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
462 if (D11REV_IS(sh->corerev, 4)) 437 if (D11REV_IS(sh->corerev, 4))
463 sflags = SISF_2G_PHY | SISF_5G_PHY; 438 sflags = SISF_2G_PHY | SISF_5G_PHY;
464 else 439 else
465 sflags = ai_core_sflags(sh->sih, 0, 0); 440 sflags = bcma_aread32(d11core, BCMA_IOST);
466 441
467 if (bandtype == BRCM_BAND_5G) { 442 if (bandtype == BRCM_BAND_5G) {
468 if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0) 443 if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0)
@@ -480,7 +455,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
480 if (pi == NULL) 455 if (pi == NULL)
481 return NULL; 456 return NULL;
482 pi->wiphy = wiphy; 457 pi->wiphy = wiphy;
483 pi->regs = regs; 458 pi->d11core = d11core;
484 pi->sh = sh; 459 pi->sh = sh;
485 pi->phy_init_por = true; 460 pi->phy_init_por = true;
486 pi->phy_wreg_limit = PHY_WREG_LIMIT; 461 pi->phy_wreg_limit = PHY_WREG_LIMIT;
@@ -495,7 +470,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
495 pi->pubpi.coreflags = SICF_GMODE; 470 pi->pubpi.coreflags = SICF_GMODE;
496 471
497 wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags); 472 wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags);
498 phyversion = R_REG(&pi->regs->phyversion); 473 phyversion = bcma_read16(pi->d11core, D11REGOFFS(phyversion));
499 474
500 pi->pubpi.phy_type = PHY_TYPE(phyversion); 475 pi->pubpi.phy_type = PHY_TYPE(phyversion);
501 pi->pubpi.phy_rev = phyversion & PV_PV_MASK; 476 pi->pubpi.phy_rev = phyversion & PV_PV_MASK;
@@ -507,8 +482,8 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
507 pi->pubpi.phy_corenum = PHY_CORE_NUM_2; 482 pi->pubpi.phy_corenum = PHY_CORE_NUM_2;
508 pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT; 483 pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT;
509 484
510 if (!pi->pubpi.phy_type == PHY_TYPE_N && 485 if (pi->pubpi.phy_type != PHY_TYPE_N &&
511 !pi->pubpi.phy_type == PHY_TYPE_LCN) 486 pi->pubpi.phy_type != PHY_TYPE_LCN)
512 goto err; 487 goto err;
513 488
514 if (bandtype == BRCM_BAND_5G) { 489 if (bandtype == BRCM_BAND_5G) {
@@ -779,14 +754,14 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec)
779 754
780 pi->radio_chanspec = chanspec; 755 pi->radio_chanspec = chanspec;
781 756
782 mc = R_REG(&pi->regs->maccontrol); 757 mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
783 if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init")) 758 if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init"))
784 return; 759 return;
785 760
786 if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN)) 761 if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN))
787 pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC; 762 pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
788 763
789 if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA), 764 if (WARN(!(bcma_aread32(pi->d11core, BCMA_IOST) & SISF_FCLKA),
790 "HW error SISF_FCLKA\n")) 765 "HW error SISF_FCLKA\n"))
791 return; 766 return;
792 767
@@ -825,8 +800,8 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih)
825 struct brcms_phy *pi = (struct brcms_phy *) pih; 800 struct brcms_phy *pi = (struct brcms_phy *) pih;
826 void (*cal_init)(struct brcms_phy *) = NULL; 801 void (*cal_init)(struct brcms_phy *) = NULL;
827 802
828 if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0, 803 if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
829 "HW error: MAC enabled during phy cal\n")) 804 MCTL_EN_MAC) != 0, "HW error: MAC enabled during phy cal\n"))
830 return; 805 return;
831 806
832 if (!pi->initialized) { 807 if (!pi->initialized) {
@@ -1017,7 +992,7 @@ wlc_phy_init_radio_regs(struct brcms_phy *pi,
1017void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on) 992void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
1018{ 993{
1019#define DUMMY_PKT_LEN 20 994#define DUMMY_PKT_LEN 20
1020 struct d11regs __iomem *regs = pi->regs; 995 struct bcma_device *core = pi->d11core;
1021 int i, count; 996 int i, count;
1022 u8 ofdmpkt[DUMMY_PKT_LEN] = { 997 u8 ofdmpkt[DUMMY_PKT_LEN] = {
1023 0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00, 998 0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00,
@@ -1033,26 +1008,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
1033 wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN, 1008 wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN,
1034 dummypkt); 1009 dummypkt);
1035 1010
1036 W_REG(&regs->xmtsel, 0); 1011 bcma_write16(core, D11REGOFFS(xmtsel), 0);
1037 1012
1038 if (D11REV_GE(pi->sh->corerev, 11)) 1013 if (D11REV_GE(pi->sh->corerev, 11))
1039 W_REG(&regs->wepctl, 0x100); 1014 bcma_write16(core, D11REGOFFS(wepctl), 0x100);
1040 else 1015 else
1041 W_REG(&regs->wepctl, 0); 1016 bcma_write16(core, D11REGOFFS(wepctl), 0);
1042 1017
1043 W_REG(&regs->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0); 1018 bcma_write16(core, D11REGOFFS(txe_phyctl),
1019 (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
1044 if (ISNPHY(pi) || ISLCNPHY(pi)) 1020 if (ISNPHY(pi) || ISLCNPHY(pi))
1045 W_REG(&regs->txe_phyctl1, 0x1A02); 1021 bcma_write16(core, D11REGOFFS(txe_phyctl1), 0x1A02);
1046 1022
1047 W_REG(&regs->txe_wm_0, 0); 1023 bcma_write16(core, D11REGOFFS(txe_wm_0), 0);
1048 W_REG(&regs->txe_wm_1, 0); 1024 bcma_write16(core, D11REGOFFS(txe_wm_1), 0);
1049 1025
1050 W_REG(&regs->xmttplatetxptr, 0); 1026 bcma_write16(core, D11REGOFFS(xmttplatetxptr), 0);
1051 W_REG(&regs->xmttxcnt, DUMMY_PKT_LEN); 1027 bcma_write16(core, D11REGOFFS(xmttxcnt), DUMMY_PKT_LEN);
1052 1028
1053 W_REG(&regs->xmtsel, ((8 << 8) | (1 << 5) | (1 << 2) | 2)); 1029 bcma_write16(core, D11REGOFFS(xmtsel),
1030 ((8 << 8) | (1 << 5) | (1 << 2) | 2));
1054 1031
1055 W_REG(&regs->txe_ctl, 0); 1032 bcma_write16(core, D11REGOFFS(txe_ctl), 0);
1056 1033
1057 if (!pa_on) { 1034 if (!pa_on) {
1058 if (ISNPHY(pi)) 1035 if (ISNPHY(pi))
@@ -1060,27 +1037,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
1060 } 1037 }
1061 1038
1062 if (ISNPHY(pi) || ISLCNPHY(pi)) 1039 if (ISNPHY(pi) || ISLCNPHY(pi))
1063 W_REG(&regs->txe_aux, 0xD0); 1040 bcma_write16(core, D11REGOFFS(txe_aux), 0xD0);
1064 else 1041 else
1065 W_REG(&regs->txe_aux, ((1 << 5) | (1 << 4))); 1042 bcma_write16(core, D11REGOFFS(txe_aux), ((1 << 5) | (1 << 4)));
1066 1043
1067 (void)R_REG(&regs->txe_aux); 1044 (void)bcma_read16(core, D11REGOFFS(txe_aux));
1068 1045
1069 i = 0; 1046 i = 0;
1070 count = ofdm ? 30 : 250; 1047 count = ofdm ? 30 : 250;
1071 while ((i++ < count) 1048 while ((i++ < count)
1072 && (R_REG(&regs->txe_status) & (1 << 7))) 1049 && (bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 7)))
1073 udelay(10); 1050 udelay(10);
1074 1051
1075 i = 0; 1052 i = 0;
1076 1053
1077 while ((i++ < 10) 1054 while ((i++ < 10) &&
1078 && ((R_REG(&regs->txe_status) & (1 << 10)) == 0)) 1055 ((bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 10)) == 0))
1079 udelay(10); 1056 udelay(10);
1080 1057
1081 i = 0; 1058 i = 0;
1082 1059
1083 while ((i++ < 10) && ((R_REG(&regs->ifsstat) & (1 << 8)))) 1060 while ((i++ < 10) &&
1061 ((bcma_read16(core, D11REGOFFS(ifsstat)) & (1 << 8))))
1084 udelay(10); 1062 udelay(10);
1085 1063
1086 if (!pa_on) { 1064 if (!pa_on) {
@@ -1137,7 +1115,7 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi)
1137void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on) 1115void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
1138{ 1116{
1139 struct brcms_phy *pi = (struct brcms_phy *) pih; 1117 struct brcms_phy *pi = (struct brcms_phy *) pih;
1140 (void)R_REG(&pi->regs->maccontrol); 1118 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
1141 1119
1142 if (ISNPHY(pi)) { 1120 if (ISNPHY(pi)) {
1143 wlc_phy_switch_radio_nphy(pi, on); 1121 wlc_phy_switch_radio_nphy(pi, on);
@@ -1377,7 +1355,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
1377 memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM], 1355 memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM],
1378 &txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM); 1356 &txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
1379 1357
1380 if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) 1358 if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
1381 mac_enabled = true; 1359 mac_enabled = true;
1382 1360
1383 if (mac_enabled) 1361 if (mac_enabled)
@@ -1407,7 +1385,8 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
1407 if (!SCAN_INPROG_PHY(pi)) { 1385 if (!SCAN_INPROG_PHY(pi)) {
1408 bool suspend; 1386 bool suspend;
1409 1387
1410 suspend = (0 == (R_REG(&pi->regs->maccontrol) & 1388 suspend = (0 == (bcma_read32(pi->d11core,
1389 D11REGOFFS(maccontrol)) &
1411 MCTL_EN_MAC)); 1390 MCTL_EN_MAC));
1412 1391
1413 if (!suspend) 1392 if (!suspend)
@@ -1860,18 +1839,17 @@ void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end)
1860 1839
1861 if (NREV_IS(pi->pubpi.phy_rev, 3) 1840 if (NREV_IS(pi->pubpi.phy_rev, 3)
1862 || NREV_IS(pi->pubpi.phy_rev, 4)) { 1841 || NREV_IS(pi->pubpi.phy_rev, 4)) {
1863 W_REG(&pi->regs->phyregaddr, 0xa0); 1842 bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
1864 (void)R_REG(&pi->regs->phyregaddr); 1843 0xa0);
1865 rxc = R_REG(&pi->regs->phyregdata); 1844 bcma_set16(pi->d11core, D11REGOFFS(phyregdata),
1866 W_REG(&pi->regs->phyregdata, 1845 0x1 << 15);
1867 (0x1 << 15) | rxc);
1868 } 1846 }
1869 } else { 1847 } else {
1870 if (NREV_IS(pi->pubpi.phy_rev, 3) 1848 if (NREV_IS(pi->pubpi.phy_rev, 3)
1871 || NREV_IS(pi->pubpi.phy_rev, 4)) { 1849 || NREV_IS(pi->pubpi.phy_rev, 4)) {
1872 W_REG(&pi->regs->phyregaddr, 0xa0); 1850 bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
1873 (void)R_REG(&pi->regs->phyregaddr); 1851 0xa0);
1874 W_REG(&pi->regs->phyregdata, rxc); 1852 bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc);
1875 } 1853 }
1876 1854
1877 wlc_phy_por_inform(ppi); 1855 wlc_phy_por_inform(ppi);
@@ -1991,7 +1969,9 @@ void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl)
1991 pi->txpwrctrl = hwpwrctrl; 1969 pi->txpwrctrl = hwpwrctrl;
1992 1970
1993 if (ISNPHY(pi)) { 1971 if (ISNPHY(pi)) {
1994 suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 1972 suspend = (0 == (bcma_read32(pi->d11core,
1973 D11REGOFFS(maccontrol)) &
1974 MCTL_EN_MAC));
1995 if (!suspend) 1975 if (!suspend)
1996 wlapi_suspend_mac_and_wait(pi->sh->physhim); 1976 wlapi_suspend_mac_and_wait(pi->sh->physhim);
1997 1977
@@ -2193,7 +2173,8 @@ void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val)
2193 if (!pi->sh->clk) 2173 if (!pi->sh->clk)
2194 return; 2174 return;
2195 2175
2196 suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 2176 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
2177 MCTL_EN_MAC));
2197 if (!suspend) 2178 if (!suspend)
2198 wlapi_suspend_mac_and_wait(pi->sh->physhim); 2179 wlapi_suspend_mac_and_wait(pi->sh->physhim);
2199 2180
@@ -2411,8 +2392,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
2411 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); 2392 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
2412 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); 2393 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
2413 2394
2414 OR_REG(&pi->regs->maccommand, 2395 bcma_set32(pi->d11core, D11REGOFFS(maccommand),
2415 MCMD_BG_NOISE); 2396 MCMD_BG_NOISE);
2416 } else { 2397 } else {
2417 wlapi_suspend_mac_and_wait(pi->sh->physhim); 2398 wlapi_suspend_mac_and_wait(pi->sh->physhim);
2418 wlc_lcnphy_deaf_mode(pi, (bool) 0); 2399 wlc_lcnphy_deaf_mode(pi, (bool) 0);
@@ -2430,8 +2411,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
2430 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); 2411 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
2431 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); 2412 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
2432 2413
2433 OR_REG(&pi->regs->maccommand, 2414 bcma_set32(pi->d11core, D11REGOFFS(maccommand),
2434 MCMD_BG_NOISE); 2415 MCMD_BG_NOISE);
2435 } else { 2416 } else {
2436 struct phy_iq_est est[PHY_CORE_MAX]; 2417 struct phy_iq_est est[PHY_CORE_MAX];
2437 u32 cmplx_pwr[PHY_CORE_MAX]; 2418 u32 cmplx_pwr[PHY_CORE_MAX];
@@ -2924,29 +2905,29 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
2924 mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2); 2905 mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2);
2925 2906
2926 } 2907 }
2927 ai_corereg(pi->sh->sih, SI_CC_IDX, 2908 ai_cc_reg(pi->sh->sih,
2928 offsetof(struct chipcregs, gpiocontrol), 2909 offsetof(struct chipcregs, gpiocontrol),
2929 ~0x0, 0x0); 2910 ~0x0, 0x0);
2930 ai_corereg(pi->sh->sih, SI_CC_IDX, 2911 ai_cc_reg(pi->sh->sih,
2931 offsetof(struct chipcregs, gpioout), 0x40, 2912 offsetof(struct chipcregs, gpioout),
2932 0x40); 2913 0x40, 0x40);
2933 ai_corereg(pi->sh->sih, SI_CC_IDX, 2914 ai_cc_reg(pi->sh->sih,
2934 offsetof(struct chipcregs, gpioouten), 0x40, 2915 offsetof(struct chipcregs, gpioouten),
2935 0x40); 2916 0x40, 0x40);
2936 } else { 2917 } else {
2937 mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2); 2918 mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
2938 2919
2939 mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2); 2920 mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
2940 2921
2941 ai_corereg(pi->sh->sih, SI_CC_IDX, 2922 ai_cc_reg(pi->sh->sih,
2942 offsetof(struct chipcregs, gpioout), 0x40, 2923 offsetof(struct chipcregs, gpioout),
2943 0x00); 2924 0x40, 0x00);
2944 ai_corereg(pi->sh->sih, SI_CC_IDX, 2925 ai_cc_reg(pi->sh->sih,
2945 offsetof(struct chipcregs, gpioouten), 0x40, 2926 offsetof(struct chipcregs, gpioouten),
2946 0x0); 2927 0x40, 0x0);
2947 ai_corereg(pi->sh->sih, SI_CC_IDX, 2928 ai_cc_reg(pi->sh->sih,
2948 offsetof(struct chipcregs, gpiocontrol), 2929 offsetof(struct chipcregs, gpiocontrol),
2949 ~0x0, 0x40); 2930 ~0x0, 0x40);
2950 } 2931 }
2951 } 2932 }
2952} 2933}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index 96e15163222b..e34a71e7d242 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -166,7 +166,6 @@ struct shared_phy_params {
166 struct phy_shim_info *physhim; 166 struct phy_shim_info *physhim;
167 uint unit; 167 uint unit;
168 uint corerev; 168 uint corerev;
169 uint buscorerev;
170 u16 vid; 169 u16 vid;
171 u16 did; 170 u16 did;
172 uint chip; 171 uint chip;
@@ -175,7 +174,6 @@ struct shared_phy_params {
175 uint sromrev; 174 uint sromrev;
176 uint boardtype; 175 uint boardtype;
177 uint boardrev; 176 uint boardrev;
178 uint boardvendor;
179 u32 boardflags; 177 u32 boardflags;
180 u32 boardflags2; 178 u32 boardflags2;
181}; 179};
@@ -183,7 +181,7 @@ struct shared_phy_params {
183 181
184extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp); 182extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
185extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh, 183extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
186 struct d11regs __iomem *regs, 184 struct bcma_device *d11core,
187 int bandtype, struct wiphy *wiphy); 185 int bandtype, struct wiphy *wiphy);
188extern void wlc_phy_detach(struct brcms_phy_pub *ppi); 186extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
189 187
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index 5f9478b1c993..af00e2c2b266 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -503,10 +503,8 @@ struct shared_phy {
503 uint sromrev; 503 uint sromrev;
504 uint boardtype; 504 uint boardtype;
505 uint boardrev; 505 uint boardrev;
506 uint boardvendor;
507 u32 boardflags; 506 u32 boardflags;
508 u32 boardflags2; 507 u32 boardflags2;
509 uint buscorerev;
510 uint fast_timer; 508 uint fast_timer;
511 uint slow_timer; 509 uint slow_timer;
512 uint glacial_timer; 510 uint glacial_timer;
@@ -559,7 +557,7 @@ struct brcms_phy {
559 } u; 557 } u;
560 bool user_txpwr_at_rfport; 558 bool user_txpwr_at_rfport;
561 559
562 struct d11regs __iomem *regs; 560 struct bcma_device *d11core;
563 struct brcms_phy *next; 561 struct brcms_phy *next;
564 struct brcms_phy_pub pubpi; 562 struct brcms_phy_pub pubpi;
565 563
@@ -1090,7 +1088,7 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
1090 1088
1091#define BRCMS_PHY_WAR_PR51571(pi) \ 1089#define BRCMS_PHY_WAR_PR51571(pi) \
1092 if (NREV_LT((pi)->pubpi.phy_rev, 3)) \ 1090 if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
1093 (void)R_REG(&(pi)->regs->maccontrol) 1091 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
1094 1092
1095extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype); 1093extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
1096extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi); 1094extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index a63aa99d9810..efa0142bdad5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -2813,10 +2813,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2813 u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; 2813 u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
2814 u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; 2814 u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
2815 idleTssi = read_phy_reg(pi, 0x4ab); 2815 idleTssi = read_phy_reg(pi, 0x4ab);
2816 suspend = 2816 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
2817 (0 == 2817 MCTL_EN_MAC));
2818 (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
2819 MCTL_EN_MAC));
2820 if (!suspend) 2818 if (!suspend)
2821 wlapi_suspend_mac_and_wait(pi->sh->physhim); 2819 wlapi_suspend_mac_and_wait(pi->sh->physhim);
2822 wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); 2820 wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
@@ -2890,7 +2888,8 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
2890 2888
2891 for (i = 0; i < 14; i++) 2889 for (i = 0; i < 14; i++)
2892 values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]); 2890 values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]);
2893 suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 2891 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
2892 MCTL_EN_MAC));
2894 if (!suspend) 2893 if (!suspend)
2895 wlapi_suspend_mac_and_wait(pi->sh->physhim); 2894 wlapi_suspend_mac_and_wait(pi->sh->physhim);
2896 save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4); 2895 save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4);
@@ -3016,8 +3015,8 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
3016 bool suspend; 3015 bool suspend;
3017 struct brcms_phy *pi = (struct brcms_phy *) ppi; 3016 struct brcms_phy *pi = (struct brcms_phy *) ppi;
3018 3017
3019 suspend = 3018 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
3020 (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 3019 MCTL_EN_MAC));
3021 if (!suspend) 3020 if (!suspend)
3022 wlapi_suspend_mac_and_wait(pi->sh->physhim); 3021 wlapi_suspend_mac_and_wait(pi->sh->physhim);
3023 3022
@@ -3535,15 +3534,17 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
3535 timer = 0; 3534 timer = 0;
3536 old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); 3535 old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
3537 3536
3538 curval1 = R_REG(&pi->regs->psm_corectlsts); 3537 curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts));
3539 ptr[130] = 0; 3538 ptr[130] = 0;
3540 W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1)); 3539 bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts),
3540 ((1 << 6) | curval1));
3541 3541
3542 W_REG(&pi->regs->smpl_clct_strptr, 0x7E00); 3542 bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00);
3543 W_REG(&pi->regs->smpl_clct_stpptr, 0x8000); 3543 bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000);
3544 udelay(20); 3544 udelay(20);
3545 curval2 = R_REG(&pi->regs->psm_phy_hdr_param); 3545 curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
3546 W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30); 3546 bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
3547 curval2 | 0x30);
3547 3548
3548 write_phy_reg(pi, 0x555, 0x0); 3549 write_phy_reg(pi, 0x555, 0x0);
3549 write_phy_reg(pi, 0x5a6, 0x5); 3550 write_phy_reg(pi, 0x5a6, 0x5);
@@ -3560,19 +3561,19 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
3560 3561
3561 sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); 3562 sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
3562 write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008)); 3563 write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
3563 stpptr = R_REG(&pi->regs->smpl_clct_stpptr); 3564 stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr));
3564 curptr = R_REG(&pi->regs->smpl_clct_curptr); 3565 curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
3565 do { 3566 do {
3566 udelay(10); 3567 udelay(10);
3567 curptr = R_REG(&pi->regs->smpl_clct_curptr); 3568 curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
3568 timer++; 3569 timer++;
3569 } while ((curptr != stpptr) && (timer < 500)); 3570 } while ((curptr != stpptr) && (timer < 500));
3570 3571
3571 W_REG(&pi->regs->psm_phy_hdr_param, 0x2); 3572 bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2);
3572 strptr = 0x7E00; 3573 strptr = 0x7E00;
3573 W_REG(&pi->regs->tplatewrptr, strptr); 3574 bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr);
3574 while (strptr < 0x8000) { 3575 while (strptr < 0x8000) {
3575 val = R_REG(&pi->regs->tplatewrdata); 3576 val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata));
3576 imag = ((val >> 16) & 0x3ff); 3577 imag = ((val >> 16) & 0x3ff);
3577 real = ((val) & 0x3ff); 3578 real = ((val) & 0x3ff);
3578 if (imag > 511) 3579 if (imag > 511)
@@ -3597,8 +3598,8 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
3597 } 3598 }
3598 3599
3599 write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); 3600 write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
3600 W_REG(&pi->regs->psm_phy_hdr_param, curval2); 3601 bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2);
3601 W_REG(&pi->regs->psm_corectlsts, curval1); 3602 bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1);
3602} 3603}
3603 3604
3604static void 3605static void
@@ -3968,9 +3969,9 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
3968 bool suspend = 0; 3969 bool suspend = 0;
3969 3970
3970 if (mode == 1) { 3971 if (mode == 1) {
3971 suspend = 3972 suspend = (0 == (bcma_read32(pi->d11core,
3972 (0 == 3973 D11REGOFFS(maccontrol)) &
3973 (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 3974 MCTL_EN_MAC));
3974 if (!suspend) 3975 if (!suspend)
3975 wlapi_suspend_mac_and_wait(pi->sh->physhim); 3976 wlapi_suspend_mac_and_wait(pi->sh->physhim);
3976 wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE); 3977 wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4012,9 +4013,9 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
4012 struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; 4013 struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
4013 4014
4014 if (mode == 1) { 4015 if (mode == 1) {
4015 suspend = 4016 suspend = (0 == (bcma_read32(pi->d11core,
4016 (0 == 4017 D11REGOFFS(maccontrol)) &
4017 (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 4018 MCTL_EN_MAC));
4018 if (!suspend) 4019 if (!suspend)
4019 wlapi_suspend_mac_and_wait(pi->sh->physhim); 4020 wlapi_suspend_mac_and_wait(pi->sh->physhim);
4020 wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE); 4021 wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4078,9 +4079,9 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
4078 bool suspend = 0; 4079 bool suspend = 0;
4079 4080
4080 if (mode == 1) { 4081 if (mode == 1) {
4081 suspend = 4082 suspend = (0 == (bcma_read32(pi->d11core,
4082 (0 == 4083 D11REGOFFS(maccontrol)) &
4083 (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 4084 MCTL_EN_MAC));
4084 if (!suspend) 4085 if (!suspend)
4085 wlapi_suspend_mac_and_wait(pi->sh->physhim); 4086 wlapi_suspend_mac_and_wait(pi->sh->physhim);
4086 wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE); 4087 wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE);
@@ -4127,8 +4128,8 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
4127 s8 index; 4128 s8 index;
4128 u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); 4129 u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
4129 struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; 4130 struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
4130 suspend = 4131 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
4131 (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 4132 MCTL_EN_MAC));
4132 if (!suspend) 4133 if (!suspend)
4133 wlapi_suspend_mac_and_wait(pi->sh->physhim); 4134 wlapi_suspend_mac_and_wait(pi->sh->physhim);
4134 wlc_lcnphy_deaf_mode(pi, true); 4135 wlc_lcnphy_deaf_mode(pi, true);
@@ -4166,8 +4167,8 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
4166 pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec); 4167 pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
4167 index = pi_lcn->lcnphy_current_index; 4168 index = pi_lcn->lcnphy_current_index;
4168 4169
4169 suspend = 4170 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
4170 (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 4171 MCTL_EN_MAC));
4171 if (!suspend) { 4172 if (!suspend) {
4172 wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000); 4173 wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
4173 wlapi_suspend_mac_and_wait(pi->sh->physhim); 4174 wlapi_suspend_mac_and_wait(pi->sh->physhim);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index ec9b56639d54..a16f1ab292fd 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -17802,7 +17802,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
17802 17802
17803 if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { 17803 if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
17804 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); 17804 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
17805 (void)R_REG(&pi->regs->maccontrol); 17805 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
17806 udelay(1); 17806 udelay(1);
17807 } 17807 }
17808 17808
@@ -17953,7 +17953,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
17953 17953
17954 if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { 17954 if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
17955 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); 17955 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
17956 (void)R_REG(&pi->regs->maccontrol); 17956 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
17957 udelay(1); 17957 udelay(1);
17958 } 17958 }
17959 17959
@@ -19447,8 +19447,6 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
19447 u8 tx_pwr_ctrl_state; 19447 u8 tx_pwr_ctrl_state;
19448 bool do_nphy_cal = false; 19448 bool do_nphy_cal = false;
19449 uint core; 19449 uint core;
19450 uint origidx, intr_val;
19451 struct d11regs __iomem *regs;
19452 u32 d11_clk_ctl_st; 19450 u32 d11_clk_ctl_st;
19453 bool do_rssi_cal = false; 19451 bool do_rssi_cal = false;
19454 19452
@@ -19462,25 +19460,21 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
19462 (pi->sh->chippkg == BCM4718_PKG_ID))) { 19460 (pi->sh->chippkg == BCM4718_PKG_ID))) {
19463 if ((pi->sh->boardflags & BFL_EXTLNA) && 19461 if ((pi->sh->boardflags & BFL_EXTLNA) &&
19464 (CHSPEC_IS2G(pi->radio_chanspec))) 19462 (CHSPEC_IS2G(pi->radio_chanspec)))
19465 ai_corereg(pi->sh->sih, SI_CC_IDX, 19463 ai_cc_reg(pi->sh->sih,
19466 offsetof(struct chipcregs, chipcontrol), 19464 offsetof(struct chipcregs, chipcontrol),
19467 0x40, 0x40); 19465 0x40, 0x40);
19468 } 19466 }
19469 19467
19470 if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) && 19468 if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
19471 CHSPEC_IS40(pi->radio_chanspec)) { 19469 CHSPEC_IS40(pi->radio_chanspec)) {
19472 19470
19473 regs = (struct d11regs __iomem *) 19471 d11_clk_ctl_st = bcma_read32(pi->d11core,
19474 ai_switch_core(pi->sh->sih, 19472 D11REGOFFS(clk_ctl_st));
19475 D11_CORE_ID, &origidx, 19473 bcma_mask32(pi->d11core, D11REGOFFS(clk_ctl_st),
19476 &intr_val); 19474 ~(CCS_FORCEHT | CCS_HTAREQ));
19477 d11_clk_ctl_st = R_REG(&regs->clk_ctl_st);
19478 AND_REG(&regs->clk_ctl_st,
19479 ~(CCS_FORCEHT | CCS_HTAREQ));
19480 19475
19481 W_REG(&regs->clk_ctl_st, d11_clk_ctl_st); 19476 bcma_write32(pi->d11core, D11REGOFFS(clk_ctl_st),
19482 19477 d11_clk_ctl_st);
19483 ai_restore_core(pi->sh->sih, origidx, intr_val);
19484 } 19478 }
19485 19479
19486 pi->use_int_tx_iqlo_cal_nphy = 19480 pi->use_int_tx_iqlo_cal_nphy =
@@ -19885,7 +19879,8 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask)
19885 if (!pi->sh->clk) 19879 if (!pi->sh->clk)
19886 return; 19880 return;
19887 19881
19888 suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 19882 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
19883 MCTL_EN_MAC));
19889 if (!suspend) 19884 if (!suspend)
19890 wlapi_suspend_mac_and_wait(pi->sh->physhim); 19885 wlapi_suspend_mac_and_wait(pi->sh->physhim);
19891 19886
@@ -21263,28 +21258,28 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
21263 val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand; 21258 val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
21264 if (CHSPEC_IS5G(chanspec) && !val) { 21259 if (CHSPEC_IS5G(chanspec) && !val) {
21265 21260
21266 val = R_REG(&pi->regs->psm_phy_hdr_param); 21261 val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
21267 W_REG(&pi->regs->psm_phy_hdr_param, 21262 bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
21268 (val | MAC_PHY_FORCE_CLK)); 21263 (val | MAC_PHY_FORCE_CLK));
21269 21264
21270 or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG), 21265 or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
21271 (BBCFG_RESETCCA | BBCFG_RESETRX)); 21266 (BBCFG_RESETCCA | BBCFG_RESETRX));
21272 21267
21273 W_REG(&pi->regs->psm_phy_hdr_param, val); 21268 bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
21274 21269
21275 or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand); 21270 or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand);
21276 } else if (!CHSPEC_IS5G(chanspec) && val) { 21271 } else if (!CHSPEC_IS5G(chanspec) && val) {
21277 21272
21278 and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand); 21273 and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand);
21279 21274
21280 val = R_REG(&pi->regs->psm_phy_hdr_param); 21275 val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
21281 W_REG(&pi->regs->psm_phy_hdr_param, 21276 bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
21282 (val | MAC_PHY_FORCE_CLK)); 21277 (val | MAC_PHY_FORCE_CLK));
21283 21278
21284 and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG), 21279 and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
21285 (u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX))); 21280 (u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
21286 21281
21287 W_REG(&pi->regs->psm_phy_hdr_param, val); 21282 bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
21288 } 21283 }
21289 21284
21290 write_phy_reg(pi, 0x1ce, ci->PHY_BW1a); 21285 write_phy_reg(pi, 0x1ce, ci->PHY_BW1a);
@@ -21342,24 +21337,23 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
21342 spuravoid = 1; 21337 spuravoid = 1;
21343 21338
21344 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false); 21339 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
21345 si_pmu_spuravoid(pi->sh->sih, spuravoid); 21340 si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
21346 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true); 21341 wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
21347 21342
21348 if ((pi->sh->chip == BCM43224_CHIP_ID) || 21343 if ((pi->sh->chip == BCM43224_CHIP_ID) ||
21349 (pi->sh->chip == BCM43225_CHIP_ID)) { 21344 (pi->sh->chip == BCM43225_CHIP_ID)) {
21350
21351 if (spuravoid == 1) { 21345 if (spuravoid == 1) {
21352 21346 bcma_write16(pi->d11core,
21353 W_REG(&pi->regs->tsf_clk_frac_l, 21347 D11REGOFFS(tsf_clk_frac_l),
21354 0x5341); 21348 0x5341);
21355 W_REG(&pi->regs->tsf_clk_frac_h, 21349 bcma_write16(pi->d11core,
21356 0x8); 21350 D11REGOFFS(tsf_clk_frac_h), 0x8);
21357 } else { 21351 } else {
21358 21352 bcma_write16(pi->d11core,
21359 W_REG(&pi->regs->tsf_clk_frac_l, 21353 D11REGOFFS(tsf_clk_frac_l),
21360 0x8889); 21354 0x8889);
21361 W_REG(&pi->regs->tsf_clk_frac_h, 21355 bcma_write16(pi->d11core,
21362 0x8); 21356 D11REGOFFS(tsf_clk_frac_h), 0x8);
21363 } 21357 }
21364 } 21358 }
21365 21359
@@ -21499,13 +21493,13 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
21499 21493
21500 ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY); 21494 ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY);
21501 21495
21502 mc = R_REG(&pi->regs->maccontrol); 21496 mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
21503 mc &= ~MCTL_GPOUT_SEL_MASK; 21497 mc &= ~MCTL_GPOUT_SEL_MASK;
21504 W_REG(&pi->regs->maccontrol, mc); 21498 bcma_write32(pi->d11core, D11REGOFFS(maccontrol), mc);
21505 21499
21506 OR_REG(&pi->regs->psm_gpio_oe, mask); 21500 bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
21507 21501
21508 AND_REG(&pi->regs->psm_gpio_out, ~mask); 21502 bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
21509 21503
21510 if (lut_init) { 21504 if (lut_init) {
21511 write_phy_reg(pi, 0xf8, 0x02d8); 21505 write_phy_reg(pi, 0xf8, 0x02d8);
@@ -21522,9 +21516,8 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val)
21522 bool suspended = false; 21516 bool suspended = false;
21523 21517
21524 if (D11REV_IS(pi->sh->corerev, 16)) { 21518 if (D11REV_IS(pi->sh->corerev, 16)) {
21525 suspended = 21519 suspended = (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
21526 (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ? 21520 MCTL_EN_MAC) ? false : true;
21527 false : true;
21528 if (!suspended) 21521 if (!suspended)
21529 wlapi_suspend_mac_and_wait(pi->sh->physhim); 21522 wlapi_suspend_mac_and_wait(pi->sh->physhim);
21530 } 21523 }
@@ -25383,7 +25376,8 @@ static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal)
25383 if (pi->nphy_papd_skip == 1) 25376 if (pi->nphy_papd_skip == 1)
25384 return; 25377 return;
25385 25378
25386 phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)); 25379 phy_b3 = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
25380 MCTL_EN_MAC));
25387 if (!phy_b3) 25381 if (!phy_b3)
25388 wlapi_suspend_mac_and_wait(pi->sh->physhim); 25382 wlapi_suspend_mac_and_wait(pi->sh->physhim);
25389 25383
@@ -28357,7 +28351,7 @@ void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi)
28357 28351
28358 if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { 28352 if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
28359 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); 28353 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
28360 (void)R_REG(&pi->regs->maccontrol); 28354 (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
28361 udelay(1); 28355 udelay(1);
28362 } 28356 }
28363 28357
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 12ba575f5785..4931d29d077b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -115,10 +115,10 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
115 uint rsrcs; 115 uint rsrcs;
116 116
117 /* # resources */ 117 /* # resources */
118 rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT; 118 rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
119 119
120 /* determine min/max rsrc masks */ 120 /* determine min/max rsrc masks */
121 switch (sih->chip) { 121 switch (ai_get_chip_id(sih)) {
122 case BCM43224_CHIP_ID: 122 case BCM43224_CHIP_ID:
123 case BCM43225_CHIP_ID: 123 case BCM43225_CHIP_ID:
124 /* ??? */ 124 /* ??? */
@@ -139,75 +139,84 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
139 *pmax = max_mask; 139 *pmax = max_mask;
140} 140}
141 141
142static void 142void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
143si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc,
144 u8 spuravoid)
145{ 143{
146 u32 tmp = 0; 144 u32 tmp = 0;
145 struct bcma_device *core;
147 146
148 switch (sih->chip) { 147 /* switch to chipc */
148 core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
149
150 switch (ai_get_chip_id(sih)) {
149 case BCM43224_CHIP_ID: 151 case BCM43224_CHIP_ID:
150 case BCM43225_CHIP_ID: 152 case BCM43225_CHIP_ID:
151 if (spuravoid == 1) { 153 if (spuravoid == 1) {
152 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0); 154 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
153 W_REG(&cc->pllcontrol_data, 0x11500010); 155 PMU1_PLL0_PLLCTL0);
154 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1); 156 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
155 W_REG(&cc->pllcontrol_data, 0x000C0C06); 157 0x11500010);
156 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2); 158 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
157 W_REG(&cc->pllcontrol_data, 0x0F600a08); 159 PMU1_PLL0_PLLCTL1);
158 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3); 160 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
159 W_REG(&cc->pllcontrol_data, 0x00000000); 161 0x000C0C06);
160 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4); 162 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
161 W_REG(&cc->pllcontrol_data, 0x2001E920); 163 PMU1_PLL0_PLLCTL2);
162 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5); 164 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
163 W_REG(&cc->pllcontrol_data, 0x88888815); 165 0x0F600a08);
166 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
167 PMU1_PLL0_PLLCTL3);
168 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
169 0x00000000);
170 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
171 PMU1_PLL0_PLLCTL4);
172 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
173 0x2001E920);
174 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
175 PMU1_PLL0_PLLCTL5);
176 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
177 0x88888815);
164 } else { 178 } else {
165 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0); 179 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
166 W_REG(&cc->pllcontrol_data, 0x11100010); 180 PMU1_PLL0_PLLCTL0);
167 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1); 181 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
168 W_REG(&cc->pllcontrol_data, 0x000c0c06); 182 0x11100010);
169 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2); 183 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
170 W_REG(&cc->pllcontrol_data, 0x03000a08); 184 PMU1_PLL0_PLLCTL1);
171 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3); 185 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
172 W_REG(&cc->pllcontrol_data, 0x00000000); 186 0x000c0c06);
173 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4); 187 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
174 W_REG(&cc->pllcontrol_data, 0x200005c0); 188 PMU1_PLL0_PLLCTL2);
175 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5); 189 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
176 W_REG(&cc->pllcontrol_data, 0x88888815); 190 0x03000a08);
191 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
192 PMU1_PLL0_PLLCTL3);
193 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
194 0x00000000);
195 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
196 PMU1_PLL0_PLLCTL4);
197 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
198 0x200005c0);
199 bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
200 PMU1_PLL0_PLLCTL5);
201 bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
202 0x88888815);
177 } 203 }
178 tmp = 1 << 10; 204 tmp = 1 << 10;
179 break; 205 break;
180 206
181 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
182 W_REG(&cc->pllcontrol_data, 0x11100008);
183 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
184 W_REG(&cc->pllcontrol_data, 0x0c000c06);
185 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
186 W_REG(&cc->pllcontrol_data, 0x03000a08);
187 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
188 W_REG(&cc->pllcontrol_data, 0x00000000);
189 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
190 W_REG(&cc->pllcontrol_data, 0x200005c0);
191 W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
192 W_REG(&cc->pllcontrol_data, 0x88888855);
193
194 tmp = 1 << 10;
195 break;
196
197 default: 207 default:
198 /* bail out */ 208 /* bail out */
199 return; 209 return;
200 } 210 }
201 211
202 tmp |= R_REG(&cc->pmucontrol); 212 bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
203 W_REG(&cc->pmucontrol, tmp);
204} 213}
205 214
206u16 si_pmu_fast_pwrup_delay(struct si_pub *sih) 215u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
207{ 216{
208 uint delay = PMU_MAX_TRANSITION_DLY; 217 uint delay = PMU_MAX_TRANSITION_DLY;
209 218
210 switch (sih->chip) { 219 switch (ai_get_chip_id(sih)) {
211 case BCM43224_CHIP_ID: 220 case BCM43224_CHIP_ID:
212 case BCM43225_CHIP_ID: 221 case BCM43225_CHIP_ID:
213 case BCM4313_CHIP_ID: 222 case BCM4313_CHIP_ID:
@@ -220,54 +229,35 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
220 return (u16) delay; 229 return (u16) delay;
221} 230}
222 231
223void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
224{
225 struct chipcregs __iomem *cc;
226 uint origidx;
227
228 /* Remember original core before switch to chipc */
229 origidx = ai_coreidx(sih);
230 cc = ai_setcoreidx(sih, SI_CC_IDX);
231
232 /* Return to original core */
233 ai_setcoreidx(sih, origidx);
234}
235
236/* Read/write a chipcontrol reg */ 232/* Read/write a chipcontrol reg */
237u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val) 233u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
238{ 234{
239 ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr), 235 ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg);
240 ~0, reg); 236 return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data),
241 return ai_corereg(sih, SI_CC_IDX, 237 mask, val);
242 offsetof(struct chipcregs, chipcontrol_data), mask,
243 val);
244} 238}
245 239
246/* Read/write a regcontrol reg */ 240/* Read/write a regcontrol reg */
247u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val) 241u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
248{ 242{
249 ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr), 243 ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg);
250 ~0, reg); 244 return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data),
251 return ai_corereg(sih, SI_CC_IDX, 245 mask, val);
252 offsetof(struct chipcregs, regcontrol_data), mask,
253 val);
254} 246}
255 247
256/* Read/write a pllcontrol reg */ 248/* Read/write a pllcontrol reg */
257u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val) 249u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
258{ 250{
259 ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr), 251 ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg);
260 ~0, reg); 252 return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data),
261 return ai_corereg(sih, SI_CC_IDX, 253 mask, val);
262 offsetof(struct chipcregs, pllcontrol_data), mask,
263 val);
264} 254}
265 255
266/* PMU PLL update */ 256/* PMU PLL update */
267void si_pmu_pllupd(struct si_pub *sih) 257void si_pmu_pllupd(struct si_pub *sih)
268{ 258{
269 ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol), 259 ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol),
270 PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD); 260 PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
271} 261}
272 262
273/* query alp/xtal clock frequency */ 263/* query alp/xtal clock frequency */
@@ -276,10 +266,10 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
276 u32 clock = ALP_CLOCK; 266 u32 clock = ALP_CLOCK;
277 267
278 /* bail out with default */ 268 /* bail out with default */
279 if (!(sih->cccaps & CC_CAP_PMU)) 269 if (!(ai_get_cccaps(sih) & CC_CAP_PMU))
280 return clock; 270 return clock;
281 271
282 switch (sih->chip) { 272 switch (ai_get_chip_id(sih)) {
283 case BCM43224_CHIP_ID: 273 case BCM43224_CHIP_ID:
284 case BCM43225_CHIP_ID: 274 case BCM43225_CHIP_ID:
285 case BCM4313_CHIP_ID: 275 case BCM4313_CHIP_ID:
@@ -293,95 +283,29 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
293 return clock; 283 return clock;
294} 284}
295 285
296void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
297{
298 struct chipcregs __iomem *cc;
299 uint origidx, intr_val;
300
301 /* Remember original core before switch to chipc */
302 cc = (struct chipcregs __iomem *)
303 ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
304
305 /* update the pll changes */
306 si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
307
308 /* Return to original core */
309 ai_restore_core(sih, origidx, intr_val);
310}
311
312/* initialize PMU */ 286/* initialize PMU */
313void si_pmu_init(struct si_pub *sih) 287void si_pmu_init(struct si_pub *sih)
314{ 288{
315 struct chipcregs __iomem *cc; 289 struct bcma_device *core;
316 uint origidx;
317 290
318 /* Remember original core before switch to chipc */ 291 /* select chipc */
319 origidx = ai_coreidx(sih); 292 core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
320 cc = ai_setcoreidx(sih, SI_CC_IDX);
321
322 if (sih->pmurev == 1)
323 AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
324 else if (sih->pmurev >= 2)
325 OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT);
326 293
327 /* Return to original core */ 294 if (ai_get_pmurev(sih) == 1)
328 ai_setcoreidx(sih, origidx); 295 bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
329} 296 ~PCTL_NOILP_ON_WAIT);
330 297 else if (ai_get_pmurev(sih) >= 2)
331/* initialize PMU chip controls and other chip level stuff */ 298 bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
332void si_pmu_chip_init(struct si_pub *sih)
333{
334 uint origidx;
335
336 /* Gate off SPROM clock and chip select signals */
337 si_pmu_sprom_enable(sih, false);
338
339 /* Remember original core */
340 origidx = ai_coreidx(sih);
341
342 /* Return to original core */
343 ai_setcoreidx(sih, origidx);
344}
345
346/* initialize PMU switch/regulators */
347void si_pmu_swreg_init(struct si_pub *sih)
348{
349}
350
351/* initialize PLL */
352void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
353{
354 struct chipcregs __iomem *cc;
355 uint origidx;
356
357 /* Remember original core before switch to chipc */
358 origidx = ai_coreidx(sih);
359 cc = ai_setcoreidx(sih, SI_CC_IDX);
360
361 switch (sih->chip) {
362 case BCM4313_CHIP_ID:
363 case BCM43224_CHIP_ID:
364 case BCM43225_CHIP_ID:
365 /* ??? */
366 break;
367 default:
368 break;
369 }
370
371 /* Return to original core */
372 ai_setcoreidx(sih, origidx);
373} 299}
374 300
375/* initialize PMU resources */ 301/* initialize PMU resources */
376void si_pmu_res_init(struct si_pub *sih) 302void si_pmu_res_init(struct si_pub *sih)
377{ 303{
378 struct chipcregs __iomem *cc; 304 struct bcma_device *core;
379 uint origidx;
380 u32 min_mask = 0, max_mask = 0; 305 u32 min_mask = 0, max_mask = 0;
381 306
382 /* Remember original core before switch to chipc */ 307 /* select to chipc */
383 origidx = ai_coreidx(sih); 308 core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
384 cc = ai_setcoreidx(sih, SI_CC_IDX);
385 309
386 /* Determine min/max rsrc masks */ 310 /* Determine min/max rsrc masks */
387 si_pmu_res_masks(sih, &min_mask, &max_mask); 311 si_pmu_res_masks(sih, &min_mask, &max_mask);
@@ -391,55 +315,50 @@ void si_pmu_res_init(struct si_pub *sih)
391 /* Program max resource mask */ 315 /* Program max resource mask */
392 316
393 if (max_mask) 317 if (max_mask)
394 W_REG(&cc->max_res_mask, max_mask); 318 bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
395 319
396 /* Program min resource mask */ 320 /* Program min resource mask */
397 321
398 if (min_mask) 322 if (min_mask)
399 W_REG(&cc->min_res_mask, min_mask); 323 bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
400 324
401 /* Add some delay; allow resources to come up and settle. */ 325 /* Add some delay; allow resources to come up and settle. */
402 mdelay(2); 326 mdelay(2);
403
404 /* Return to original core */
405 ai_setcoreidx(sih, origidx);
406} 327}
407 328
408u32 si_pmu_measure_alpclk(struct si_pub *sih) 329u32 si_pmu_measure_alpclk(struct si_pub *sih)
409{ 330{
410 struct chipcregs __iomem *cc; 331 struct bcma_device *core;
411 uint origidx;
412 u32 alp_khz; 332 u32 alp_khz;
413 333
414 if (sih->pmurev < 10) 334 if (ai_get_pmurev(sih) < 10)
415 return 0; 335 return 0;
416 336
417 /* Remember original core before switch to chipc */ 337 /* Remember original core before switch to chipc */
418 origidx = ai_coreidx(sih); 338 core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
419 cc = ai_setcoreidx(sih, SI_CC_IDX);
420 339
421 if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) { 340 if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
422 u32 ilp_ctr, alp_hz; 341 u32 ilp_ctr, alp_hz;
423 342
424 /* 343 /*
425 * Enable the reg to measure the freq, 344 * Enable the reg to measure the freq,
426 * in case it was disabled before 345 * in case it was disabled before
427 */ 346 */
428 W_REG(&cc->pmu_xtalfreq, 347 bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq),
429 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT); 348 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
430 349
431 /* Delay for well over 4 ILP clocks */ 350 /* Delay for well over 4 ILP clocks */
432 udelay(1000); 351 udelay(1000);
433 352
434 /* Read the latched number of ALP ticks per 4 ILP ticks */ 353 /* Read the latched number of ALP ticks per 4 ILP ticks */
435 ilp_ctr = 354 ilp_ctr = bcma_read32(core, CHIPCREGOFFS(pmu_xtalfreq)) &
436 R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK; 355 PMU_XTALFREQ_REG_ILPCTR_MASK;
437 356
438 /* 357 /*
439 * Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT 358 * Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT
440 * bit to save power 359 * bit to save power
441 */ 360 */
442 W_REG(&cc->pmu_xtalfreq, 0); 361 bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), 0);
443 362
444 /* Calculate ALP frequency */ 363 /* Calculate ALP frequency */
445 alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4; 364 alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
@@ -452,8 +371,5 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
452 } else 371 } else
453 alp_khz = 0; 372 alp_khz = 0;
454 373
455 /* Return to original core */
456 ai_setcoreidx(sih, origidx);
457
458 return alp_khz; 374 return alp_khz;
459} 375}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 3a08c620640e..3e39c5e0f9ff 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -26,13 +26,10 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
26extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); 26extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
27extern u32 si_pmu_alp_clock(struct si_pub *sih); 27extern u32 si_pmu_alp_clock(struct si_pub *sih);
28extern void si_pmu_pllupd(struct si_pub *sih); 28extern void si_pmu_pllupd(struct si_pub *sih);
29extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid); 29extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
30extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val); 30extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
31extern void si_pmu_init(struct si_pub *sih); 31extern void si_pmu_init(struct si_pub *sih);
32extern void si_pmu_chip_init(struct si_pub *sih);
33extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq);
34extern void si_pmu_res_init(struct si_pub *sih); 32extern void si_pmu_res_init(struct si_pub *sih);
35extern void si_pmu_swreg_init(struct si_pub *sih);
36extern u32 si_pmu_measure_alpclk(struct si_pub *sih); 33extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
37 34
38#endif /* _BRCM_PMU_H_ */ 35#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 21ccf3a03987..f0038ad7d7bf 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -17,6 +17,7 @@
17#ifndef _BRCM_PUB_H_ 17#ifndef _BRCM_PUB_H_
18#define _BRCM_PUB_H_ 18#define _BRCM_PUB_H_
19 19
20#include <linux/bcma/bcma.h>
20#include <brcmu_wifi.h> 21#include <brcmu_wifi.h>
21#include "types.h" 22#include "types.h"
22#include "defs.h" 23#include "defs.h"
@@ -530,9 +531,8 @@ struct brcms_antselcfg {
530 531
531/* common functions for every port */ 532/* common functions for every port */
532extern struct brcms_c_info * 533extern struct brcms_c_info *
533brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit, 534brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
534 bool piomode, void __iomem *regsva, struct pci_dev *btparam, 535 bool piomode, uint *perr);
535 uint *perr);
536extern uint brcms_c_detach(struct brcms_c_info *wlc); 536extern uint brcms_c_detach(struct brcms_c_info *wlc);
537extern int brcms_c_up(struct brcms_c_info *wlc); 537extern int brcms_c_up(struct brcms_c_info *wlc);
538extern uint brcms_c_down(struct brcms_c_info *wlc); 538extern uint brcms_c_down(struct brcms_c_info *wlc);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
index b6987ea9fc68..61092156755e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
@@ -586,17 +586,6 @@ static const struct brcms_sromvar perpath_pci_sromvars[] = {
586 * shared between devices. */ 586 * shared between devices. */
587static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE]; 587static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
588 588
589static u8 __iomem *
590srom_window_address(struct si_pub *sih, u8 __iomem *curmap)
591{
592 if (sih->ccrev < 32)
593 return curmap + PCI_BAR0_SPROM_OFFSET;
594 if (sih->cccaps & CC_CAP_SROM)
595 return curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP;
596
597 return NULL;
598}
599
600static uint mask_shift(u16 mask) 589static uint mask_shift(u16 mask)
601{ 590{
602 uint i; 591 uint i;
@@ -779,17 +768,27 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
779 * Return 0 on success, nonzero on error. 768 * Return 0 on success, nonzero on error.
780 */ 769 */
781static int 770static int
782sprom_read_pci(struct si_pub *sih, u8 __iomem *sprom, uint wordoff, 771sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc)
783 u16 *buf, uint nwords, bool check_crc)
784{ 772{
785 int err = 0; 773 int err = 0;
786 uint i; 774 uint i;
787 u8 *bbuf = (u8 *)buf; /* byte buffer */ 775 u8 *bbuf = (u8 *)buf; /* byte buffer */
788 uint nbytes = nwords << 1; 776 uint nbytes = nwords << 1;
777 struct bcma_device *core;
778 uint sprom_offset;
779
780 /* determine core to read */
781 if (ai_get_ccrev(sih) < 32) {
782 core = ai_findcore(sih, BCMA_CORE_80211, 0);
783 sprom_offset = PCI_BAR0_SPROM_OFFSET;
784 } else {
785 core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
786 sprom_offset = CHIPCREGOFFS(sromotp);
787 }
789 788
790 /* read the sprom in bytes */ 789 /* read the sprom in bytes */
791 for (i = 0; i < nbytes; i++) 790 for (i = 0; i < nbytes; i++)
792 bbuf[i] = readb(sprom+i); 791 bbuf[i] = bcma_read8(core, sprom_offset+i);
793 792
794 if (buf[0] == 0xffff) 793 if (buf[0] == 0xffff)
795 /* 794 /*
@@ -851,10 +850,9 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
851 * Initialize nonvolatile variable table from sprom. 850 * Initialize nonvolatile variable table from sprom.
852 * Return 0 on success, nonzero on error. 851 * Return 0 on success, nonzero on error.
853 */ 852 */
854static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap) 853int srom_var_init(struct si_pub *sih)
855{ 854{
856 u16 *srom; 855 u16 *srom;
857 u8 __iomem *sromwindow;
858 u8 sromrev = 0; 856 u8 sromrev = 0;
859 u32 sr; 857 u32 sr;
860 int err = 0; 858 int err = 0;
@@ -866,12 +864,9 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
866 if (!srom) 864 if (!srom)
867 return -ENOMEM; 865 return -ENOMEM;
868 866
869 sromwindow = srom_window_address(sih, curmap);
870
871 crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY); 867 crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
872 if (ai_is_sprom_available(sih)) { 868 if (ai_is_sprom_available(sih)) {
873 err = sprom_read_pci(sih, sromwindow, 0, srom, 869 err = sprom_read_pci(sih, srom, SROM4_WORDS, true);
874 SROM4_WORDS, true);
875 870
876 if (err == 0) 871 if (err == 0)
877 /* srom read and passed crc */ 872 /* srom read and passed crc */
@@ -921,21 +916,6 @@ void srom_free_vars(struct si_pub *sih)
921 kfree(entry); 916 kfree(entry);
922 } 917 }
923} 918}
924/*
925 * Initialize local vars from the right source for this platform.
926 * Return 0 on success, nonzero on error.
927 */
928int srom_var_init(struct si_pub *sih, void __iomem *curmap)
929{
930 uint len;
931
932 len = 0;
933
934 if (curmap != NULL)
935 return initvars_srom_pci(sih, curmap);
936
937 return -EINVAL;
938}
939 919
940/* 920/*
941 * Search the name=value vars for a specific one and return its value. 921 * Search the name=value vars for a specific one and return its value.
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
index c81df9798e50..f2a58f262c99 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
@@ -20,7 +20,7 @@
20#include "types.h" 20#include "types.h"
21 21
22/* Prototypes */ 22/* Prototypes */
23extern int srom_var_init(struct si_pub *sih, void __iomem *curmap); 23extern int srom_var_init(struct si_pub *sih);
24extern void srom_free_vars(struct si_pub *sih); 24extern void srom_free_vars(struct si_pub *sih);
25 25
26extern int srom_read(struct si_pub *sih, uint bus, void *curmap, 26extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
index 27a814b07462..e11ae83111e4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -250,66 +250,18 @@ do { \
250 wiphy_err(dev, "%s: " fmt, __func__, ##args); \ 250 wiphy_err(dev, "%s: " fmt, __func__, ##args); \
251} while (0) 251} while (0)
252 252
253/*
254 * Register access macros.
255 *
256 * These macro's take a pointer to the address to read as one of their
257 * arguments. The macro itself deduces the size of the IO transaction (u8, u16
258 * or u32). Advantage of this approach in combination with using a struct to
259 * define the registers in a register block, is that access size and access
260 * location are defined in only one spot. This reduces the risk of the
261 * programmer trying to use an unsupported transaction size on a register.
262 *
263 */
264
265#define R_REG(r) \
266 ({ \
267 __typeof(*(r)) __osl_v; \
268 switch (sizeof(*(r))) { \
269 case sizeof(u8): \
270 __osl_v = readb((u8 __iomem *)(r)); \
271 break; \
272 case sizeof(u16): \
273 __osl_v = readw((u16 __iomem *)(r)); \
274 break; \
275 case sizeof(u32): \
276 __osl_v = readl((u32 __iomem *)(r)); \
277 break; \
278 } \
279 __osl_v; \
280 })
281
282#define W_REG(r, v) do { \
283 switch (sizeof(*(r))) { \
284 case sizeof(u8): \
285 writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \
286 break; \
287 case sizeof(u16): \
288 writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \
289 break; \
290 case sizeof(u32): \
291 writel((u32)(v), (u32 __iomem *)(r)); \
292 break; \
293 } \
294 } while (0)
295
296#ifdef CONFIG_BCM47XX 253#ifdef CONFIG_BCM47XX
297/* 254/*
298 * bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder 255 * bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
299 * transactions. As a fix, a read after write is performed on certain places 256 * transactions. As a fix, a read after write is performed on certain places
300 * in the code. Older chips and the newer 5357 family don't require this fix. 257 * in the code. Older chips and the newer 5357 family don't require this fix.
301 */ 258 */
302#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); }) 259#define bcma_wflush16(c, o, v) \
260 ({ bcma_write16(c, o, v); (void)bcma_read16(c, o); })
303#else 261#else
304#define W_REG_FLUSH(r, v) W_REG((r), (v)) 262#define bcma_wflush16(c, o, v) bcma_write16(c, o, v)
305#endif /* CONFIG_BCM47XX */ 263#endif /* CONFIG_BCM47XX */
306 264
307#define AND_REG(r, v) W_REG((r), R_REG(r) & (v))
308#define OR_REG(r, v) W_REG((r), R_REG(r) | (v))
309
310#define SET_REG(r, mask, val) \
311 W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
312
313/* multi-bool data type: set of bools, mbool is true if any is set */ 265/* multi-bool data type: set of bools, mbool is true if any is set */
314 266
315/* set one bool */ 267/* set one bool */
diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index fefabc39e646..f96834a7c055 100644
--- a/drivers/net/wireless/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -19,6 +19,8 @@
19 19
20#include "defs.h" /* for PAD macro */ 20#include "defs.h" /* for PAD macro */
21 21
22#define CHIPCREGOFFS(field) offsetof(struct chipcregs, field)
23
22struct chipcregs { 24struct chipcregs {
23 u32 chipid; /* 0x0 */ 25 u32 chipid; /* 0x0 */
24 u32 capabilities; 26 u32 capabilities;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index e0b3e8d406b3..df7050abe717 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -1,5 +1,6 @@
1#include <linux/etherdevice.h> 1#include <linux/etherdevice.h>
2#include <linux/slab.h> 2#include <linux/slab.h>
3#include <linux/export.h>
3#include <net/lib80211.h> 4#include <net/lib80211.h>
4#include <linux/if_arp.h> 5#include <linux/if_arp.h>
5 6
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index c34a3b7f1292..344a981a052e 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,4 +1,5 @@
1#include <linux/slab.h> 1#include <linux/slab.h>
2#include <linux/export.h>
2 3
3#include "hostap_80211.h" 4#include "hostap_80211.h"
4#include "hostap_common.h" 5#include "hostap_common.h"
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 3d05dc15c6b8..e1f410277242 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -21,6 +21,8 @@
21#include <linux/random.h> 21#include <linux/random.h>
22#include <linux/if_arp.h> 22#include <linux/if_arp.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/export.h>
25#include <linux/moduleparam.h>
24 26
25#include "hostap_wlan.h" 27#include "hostap_wlan.h"
26#include "hostap.h" 28#include "hostap.h"
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 5441ad195119..89e9d3a78c3c 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -656,6 +656,9 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
656 "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02", 656 "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
657 0xe6ec52ce, 0x08649af2, 0x4b74baa0), 657 0xe6ec52ce, 0x08649af2, 0x4b74baa0),
658 PCMCIA_DEVICE_PROD_ID123( 658 PCMCIA_DEVICE_PROD_ID123(
659 "Canon", "Wireless LAN CF Card K30225", "Version 01.00",
660 0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
661 PCMCIA_DEVICE_PROD_ID123(
659 "D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02", 662 "D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
660 0x71b18589, 0xb6f1b0ab, 0x4b74baa0), 663 0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
661 PCMCIA_DEVICE_PROD_ID123( 664 PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index d737091cf6ac..47932b28aac1 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -3,6 +3,7 @@
3#include <linux/if_arp.h> 3#include <linux/if_arp.h>
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/export.h>
6#include "hostap_wlan.h" 7#include "hostap_wlan.h"
7#include "hostap.h" 8#include "hostap.h"
8#include "hostap_ap.h" 9#include "hostap_ap.h"
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 46a8c291c08a..18054d9c6688 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -5,6 +5,7 @@
5#include <linux/sched.h> 5#include <linux/sched.h>
6#include <linux/ethtool.h> 6#include <linux/ethtool.h>
7#include <linux/if_arp.h> 7#include <linux/if_arp.h>
8#include <linux/module.h>
8#include <net/lib80211.h> 9#include <net/lib80211.h>
9 10
10#include "hostap_wlan.h" 11#include "hostap_wlan.h"
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index 005ff25a405f..75ef8f04aabe 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -2,6 +2,7 @@
2 2
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/proc_fs.h> 4#include <linux/proc_fs.h>
5#include <linux/export.h>
5#include <net/lib80211.h> 6#include <net/lib80211.h>
6 7
7#include "hostap_wlan.h" 8#include "hostap_wlan.h"
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
new file mode 100644
index 000000000000..5e1a19fd354d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -0,0 +1,505 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "common.h"
30#include "3945.h"
31
32static int
33il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
34{
35 int p = 0;
36
37 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
38 le32_to_cpu(il->_3945.stats.flag));
39 if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(il->_3945.stats.flag) &
44 UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
45 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
46 (le32_to_cpu(il->_3945.stats.flag) &
47 UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
48 return p;
49}
50
51ssize_t
52il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
53 size_t count, loff_t *ppos)
54{
55 struct il_priv *il = file->private_data;
56 int pos = 0;
57 char *buf;
58 int bufsz =
59 sizeof(struct iwl39_stats_rx_phy) * 40 +
60 sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
61 ssize_t ret;
62 struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
63 struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
64 struct iwl39_stats_rx_non_phy *general, *accum_general;
65 struct iwl39_stats_rx_non_phy *delta_general, *max_general;
66
67 if (!il_is_alive(il))
68 return -EAGAIN;
69
70 buf = kzalloc(bufsz, GFP_KERNEL);
71 if (!buf) {
72 IL_ERR("Can not allocate Buffer\n");
73 return -ENOMEM;
74 }
75
76 /*
77 * The statistic information display here is based on
78 * the last stats notification from uCode
79 * might not reflect the current uCode activity
80 */
81 ofdm = &il->_3945.stats.rx.ofdm;
82 cck = &il->_3945.stats.rx.cck;
83 general = &il->_3945.stats.rx.general;
84 accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
85 accum_cck = &il->_3945.accum_stats.rx.cck;
86 accum_general = &il->_3945.accum_stats.rx.general;
87 delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
88 delta_cck = &il->_3945.delta_stats.rx.cck;
89 delta_general = &il->_3945.delta_stats.rx.general;
90 max_ofdm = &il->_3945.max_delta.rx.ofdm;
91 max_cck = &il->_3945.max_delta.rx.cck;
92 max_general = &il->_3945.max_delta.rx.general;
93
94 pos += il3945_stats_flag(il, buf, bufsz);
95 pos +=
96 scnprintf(buf + pos, bufsz - pos,
97 "%-32s current"
98 "acumulative delta max\n",
99 "Statistics_Rx - OFDM:");
100 pos +=
101 scnprintf(buf + pos, bufsz - pos,
102 " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
103 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
104 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
105 pos +=
106 scnprintf(buf + pos, bufsz - pos,
107 " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
108 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
109 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
110 pos +=
111 scnprintf(buf + pos, bufsz - pos,
112 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
113 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
114 delta_ofdm->plcp_err, max_ofdm->plcp_err);
115 pos +=
116 scnprintf(buf + pos, bufsz - pos,
117 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
118 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
119 delta_ofdm->crc32_err, max_ofdm->crc32_err);
120 pos +=
121 scnprintf(buf + pos, bufsz - pos,
122 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
123 le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
124 delta_ofdm->overrun_err, max_ofdm->overrun_err);
125 pos +=
126 scnprintf(buf + pos, bufsz - pos,
127 " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
128 le32_to_cpu(ofdm->early_overrun_err),
129 accum_ofdm->early_overrun_err,
130 delta_ofdm->early_overrun_err,
131 max_ofdm->early_overrun_err);
132 pos +=
133 scnprintf(buf + pos, bufsz - pos,
134 " %-30s %10u %10u %10u %10u\n", "crc32_good:",
135 le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
136 delta_ofdm->crc32_good, max_ofdm->crc32_good);
137 pos +=
138 scnprintf(buf + pos, bufsz - pos,
139 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
140 le32_to_cpu(ofdm->false_alarm_cnt),
141 accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
142 max_ofdm->false_alarm_cnt);
143 pos +=
144 scnprintf(buf + pos, bufsz - pos,
145 " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
146 le32_to_cpu(ofdm->fina_sync_err_cnt),
147 accum_ofdm->fina_sync_err_cnt,
148 delta_ofdm->fina_sync_err_cnt,
149 max_ofdm->fina_sync_err_cnt);
150 pos +=
151 scnprintf(buf + pos, bufsz - pos,
152 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
153 le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
154 delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
155 pos +=
156 scnprintf(buf + pos, bufsz - pos,
157 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
159 delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
160 pos +=
161 scnprintf(buf + pos, bufsz - pos,
162 " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
163 le32_to_cpu(ofdm->unresponded_rts),
164 accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
165 max_ofdm->unresponded_rts);
166 pos +=
167 scnprintf(buf + pos, bufsz - pos,
168 " %-30s %10u %10u %10u %10u\n",
169 "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos +=
175 scnprintf(buf + pos, bufsz - pos,
176 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
177 le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
178 delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
179 pos +=
180 scnprintf(buf + pos, bufsz - pos,
181 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
182 le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
183 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
184
185 pos +=
186 scnprintf(buf + pos, bufsz - pos,
187 "%-32s current"
188 "acumulative delta max\n",
189 "Statistics_Rx - CCK:");
190 pos +=
191 scnprintf(buf + pos, bufsz - pos,
192 " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
193 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
194 delta_cck->ina_cnt, max_cck->ina_cnt);
195 pos +=
196 scnprintf(buf + pos, bufsz - pos,
197 " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
198 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
199 delta_cck->fina_cnt, max_cck->fina_cnt);
200 pos +=
201 scnprintf(buf + pos, bufsz - pos,
202 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
203 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
204 delta_cck->plcp_err, max_cck->plcp_err);
205 pos +=
206 scnprintf(buf + pos, bufsz - pos,
207 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
208 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
209 delta_cck->crc32_err, max_cck->crc32_err);
210 pos +=
211 scnprintf(buf + pos, bufsz - pos,
212 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
213 le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
214 delta_cck->overrun_err, max_cck->overrun_err);
215 pos +=
216 scnprintf(buf + pos, bufsz - pos,
217 " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
218 le32_to_cpu(cck->early_overrun_err),
219 accum_cck->early_overrun_err,
220 delta_cck->early_overrun_err, max_cck->early_overrun_err);
221 pos +=
222 scnprintf(buf + pos, bufsz - pos,
223 " %-30s %10u %10u %10u %10u\n", "crc32_good:",
224 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
225 delta_cck->crc32_good, max_cck->crc32_good);
226 pos +=
227 scnprintf(buf + pos, bufsz - pos,
228 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
229 le32_to_cpu(cck->false_alarm_cnt),
230 accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
231 max_cck->false_alarm_cnt);
232 pos +=
233 scnprintf(buf + pos, bufsz - pos,
234 " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
235 le32_to_cpu(cck->fina_sync_err_cnt),
236 accum_cck->fina_sync_err_cnt,
237 delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
238 pos +=
239 scnprintf(buf + pos, bufsz - pos,
240 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
241 le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
242 delta_cck->sfd_timeout, max_cck->sfd_timeout);
243 pos +=
244 scnprintf(buf + pos, bufsz - pos,
245 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
246 le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
247 delta_cck->fina_timeout, max_cck->fina_timeout);
248 pos +=
249 scnprintf(buf + pos, bufsz - pos,
250 " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
251 le32_to_cpu(cck->unresponded_rts),
252 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
253 max_cck->unresponded_rts);
254 pos +=
255 scnprintf(buf + pos, bufsz - pos,
256 " %-30s %10u %10u %10u %10u\n",
257 "rxe_frame_lmt_ovrun:",
258 le32_to_cpu(cck->rxe_frame_limit_overrun),
259 accum_cck->rxe_frame_limit_overrun,
260 delta_cck->rxe_frame_limit_overrun,
261 max_cck->rxe_frame_limit_overrun);
262 pos +=
263 scnprintf(buf + pos, bufsz - pos,
264 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
265 le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
266 delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
267 pos +=
268 scnprintf(buf + pos, bufsz - pos,
269 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
270 le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
271 delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
272
273 pos +=
274 scnprintf(buf + pos, bufsz - pos,
275 "%-32s current"
276 "acumulative delta max\n",
277 "Statistics_Rx - GENERAL:");
278 pos +=
279 scnprintf(buf + pos, bufsz - pos,
280 " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
281 le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
282 delta_general->bogus_cts, max_general->bogus_cts);
283 pos +=
284 scnprintf(buf + pos, bufsz - pos,
285 " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
286 le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
287 delta_general->bogus_ack, max_general->bogus_ack);
288 pos +=
289 scnprintf(buf + pos, bufsz - pos,
290 " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
291 le32_to_cpu(general->non_bssid_frames),
292 accum_general->non_bssid_frames,
293 delta_general->non_bssid_frames,
294 max_general->non_bssid_frames);
295 pos +=
296 scnprintf(buf + pos, bufsz - pos,
297 " %-30s %10u %10u %10u %10u\n", "filtered_frames:",
298 le32_to_cpu(general->filtered_frames),
299 accum_general->filtered_frames,
300 delta_general->filtered_frames,
301 max_general->filtered_frames);
302 pos +=
303 scnprintf(buf + pos, bufsz - pos,
304 " %-30s %10u %10u %10u %10u\n",
305 "non_channel_beacons:",
306 le32_to_cpu(general->non_channel_beacons),
307 accum_general->non_channel_beacons,
308 delta_general->non_channel_beacons,
309 max_general->non_channel_beacons);
310
311 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
312 kfree(buf);
313 return ret;
314}
315
316ssize_t
317il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
318 size_t count, loff_t *ppos)
319{
320 struct il_priv *il = file->private_data;
321 int pos = 0;
322 char *buf;
323 int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
324 ssize_t ret;
325 struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
326
327 if (!il_is_alive(il))
328 return -EAGAIN;
329
330 buf = kzalloc(bufsz, GFP_KERNEL);
331 if (!buf) {
332 IL_ERR("Can not allocate Buffer\n");
333 return -ENOMEM;
334 }
335
336 /*
337 * The statistic information display here is based on
338 * the last stats notification from uCode
339 * might not reflect the current uCode activity
340 */
341 tx = &il->_3945.stats.tx;
342 accum_tx = &il->_3945.accum_stats.tx;
343 delta_tx = &il->_3945.delta_stats.tx;
344 max_tx = &il->_3945.max_delta.tx;
345 pos += il3945_stats_flag(il, buf, bufsz);
346 pos +=
347 scnprintf(buf + pos, bufsz - pos,
348 "%-32s current"
349 "acumulative delta max\n",
350 "Statistics_Tx:");
351 pos +=
352 scnprintf(buf + pos, bufsz - pos,
353 " %-30s %10u %10u %10u %10u\n", "preamble:",
354 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
355 delta_tx->preamble_cnt, max_tx->preamble_cnt);
356 pos +=
357 scnprintf(buf + pos, bufsz - pos,
358 " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
359 le32_to_cpu(tx->rx_detected_cnt),
360 accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
361 max_tx->rx_detected_cnt);
362 pos +=
363 scnprintf(buf + pos, bufsz - pos,
364 " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
365 le32_to_cpu(tx->bt_prio_defer_cnt),
366 accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
367 max_tx->bt_prio_defer_cnt);
368 pos +=
369 scnprintf(buf + pos, bufsz - pos,
370 " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
371 le32_to_cpu(tx->bt_prio_kill_cnt),
372 accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
373 max_tx->bt_prio_kill_cnt);
374 pos +=
375 scnprintf(buf + pos, bufsz - pos,
376 " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
377 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
378 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
379 pos +=
380 scnprintf(buf + pos, bufsz - pos,
381 " %-30s %10u %10u %10u %10u\n", "cts_timeout:",
382 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
383 delta_tx->cts_timeout, max_tx->cts_timeout);
384 pos +=
385 scnprintf(buf + pos, bufsz - pos,
386 " %-30s %10u %10u %10u %10u\n", "ack_timeout:",
387 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
388 delta_tx->ack_timeout, max_tx->ack_timeout);
389 pos +=
390 scnprintf(buf + pos, bufsz - pos,
391 " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
392 le32_to_cpu(tx->expected_ack_cnt),
393 accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
394 max_tx->expected_ack_cnt);
395 pos +=
396 scnprintf(buf + pos, bufsz - pos,
397 " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
398 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
399 delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
400
401 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
402 kfree(buf);
403 return ret;
404}
405
406ssize_t
407il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
408 size_t count, loff_t *ppos)
409{
410 struct il_priv *il = file->private_data;
411 int pos = 0;
412 char *buf;
413 int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
414 ssize_t ret;
415 struct iwl39_stats_general *general, *accum_general;
416 struct iwl39_stats_general *delta_general, *max_general;
417 struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
418 struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
419
420 if (!il_is_alive(il))
421 return -EAGAIN;
422
423 buf = kzalloc(bufsz, GFP_KERNEL);
424 if (!buf) {
425 IL_ERR("Can not allocate Buffer\n");
426 return -ENOMEM;
427 }
428
429 /*
430 * The statistic information display here is based on
431 * the last stats notification from uCode
432 * might not reflect the current uCode activity
433 */
434 general = &il->_3945.stats.general;
435 dbg = &il->_3945.stats.general.dbg;
436 div = &il->_3945.stats.general.div;
437 accum_general = &il->_3945.accum_stats.general;
438 delta_general = &il->_3945.delta_stats.general;
439 max_general = &il->_3945.max_delta.general;
440 accum_dbg = &il->_3945.accum_stats.general.dbg;
441 delta_dbg = &il->_3945.delta_stats.general.dbg;
442 max_dbg = &il->_3945.max_delta.general.dbg;
443 accum_div = &il->_3945.accum_stats.general.div;
444 delta_div = &il->_3945.delta_stats.general.div;
445 max_div = &il->_3945.max_delta.general.div;
446 pos += il3945_stats_flag(il, buf, bufsz);
447 pos +=
448 scnprintf(buf + pos, bufsz - pos,
449 "%-32s current"
450 "acumulative delta max\n",
451 "Statistics_General:");
452 pos +=
453 scnprintf(buf + pos, bufsz - pos,
454 " %-30s %10u %10u %10u %10u\n", "burst_check:",
455 le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
456 delta_dbg->burst_check, max_dbg->burst_check);
457 pos +=
458 scnprintf(buf + pos, bufsz - pos,
459 " %-30s %10u %10u %10u %10u\n", "burst_count:",
460 le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
461 delta_dbg->burst_count, max_dbg->burst_count);
462 pos +=
463 scnprintf(buf + pos, bufsz - pos,
464 " %-30s %10u %10u %10u %10u\n", "sleep_time:",
465 le32_to_cpu(general->sleep_time),
466 accum_general->sleep_time, delta_general->sleep_time,
467 max_general->sleep_time);
468 pos +=
469 scnprintf(buf + pos, bufsz - pos,
470 " %-30s %10u %10u %10u %10u\n", "slots_out:",
471 le32_to_cpu(general->slots_out), accum_general->slots_out,
472 delta_general->slots_out, max_general->slots_out);
473 pos +=
474 scnprintf(buf + pos, bufsz - pos,
475 " %-30s %10u %10u %10u %10u\n", "slots_idle:",
476 le32_to_cpu(general->slots_idle),
477 accum_general->slots_idle, delta_general->slots_idle,
478 max_general->slots_idle);
479 pos +=
480 scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
481 le32_to_cpu(general->ttl_timestamp));
482 pos +=
483 scnprintf(buf + pos, bufsz - pos,
484 " %-30s %10u %10u %10u %10u\n", "tx_on_a:",
485 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
486 delta_div->tx_on_a, max_div->tx_on_a);
487 pos +=
488 scnprintf(buf + pos, bufsz - pos,
489 " %-30s %10u %10u %10u %10u\n", "tx_on_b:",
490 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
491 delta_div->tx_on_b, max_div->tx_on_b);
492 pos +=
493 scnprintf(buf + pos, bufsz - pos,
494 " %-30s %10u %10u %10u %10u\n", "exec_time:",
495 le32_to_cpu(div->exec_time), accum_div->exec_time,
496 delta_div->exec_time, max_div->exec_time);
497 pos +=
498 scnprintf(buf + pos, bufsz - pos,
499 " %-30s %10u %10u %10u %10u\n", "probe_time:",
500 le32_to_cpu(div->probe_time), accum_div->probe_time,
501 delta_div->probe_time, max_div->probe_time);
502 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
503 kfree(buf);
504 return ret;
505}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
new file mode 100644
index 000000000000..daef6b58f6cc
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -0,0 +1,3977 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/ieee80211_radiotap.h>
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl3945"
53
54#include "commands.h"
55#include "common.h"
56#include "3945.h"
57#include "iwl-spectrum.h"
58
59/*
60 * module name, copyright, version, etc.
61 */
62
63#define DRV_DESCRIPTION \
64"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
65
66#ifdef CONFIG_IWLEGACY_DEBUG
67#define VD "d"
68#else
69#define VD
70#endif
71
72/*
73 * add "s" to indicate spectrum measurement included.
74 * we add it here to be consistent with previous releases in which
75 * this was configurable.
76 */
77#define DRV_VERSION IWLWIFI_VERSION VD "s"
78#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
79#define DRV_AUTHOR "<ilw@linux.intel.com>"
80
81MODULE_DESCRIPTION(DRV_DESCRIPTION);
82MODULE_VERSION(DRV_VERSION);
83MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
84MODULE_LICENSE("GPL");
85
86 /* module parameters */
87struct il_mod_params il3945_mod_params = {
88 .sw_crypto = 1,
89 .restart_fw = 1,
90 .disable_hw_scan = 1,
91 /* the rest are 0 by default */
92};
93
94/**
95 * il3945_get_antenna_flags - Get antenna flags for RXON command
96 * @il: eeprom and antenna fields are used to determine antenna flags
97 *
98 * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed
99 * il3945_mod_params.antenna specifies the antenna diversity mode:
100 *
101 * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
102 * IL_ANTENNA_MAIN - Force MAIN antenna
103 * IL_ANTENNA_AUX - Force AUX antenna
104 */
105__le32
106il3945_get_antenna_flags(const struct il_priv *il)
107{
108 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
109
110 switch (il3945_mod_params.antenna) {
111 case IL_ANTENNA_DIVERSITY:
112 return 0;
113
114 case IL_ANTENNA_MAIN:
115 if (eeprom->antenna_switch_type)
116 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
117 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
118
119 case IL_ANTENNA_AUX:
120 if (eeprom->antenna_switch_type)
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
122 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
123 }
124
125 /* bad antenna selector value */
126 IL_ERR("Bad antenna selector value (0x%x)\n",
127 il3945_mod_params.antenna);
128
129 return 0; /* "diversity" is default if error */
130}
131
132static int
133il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
134 struct ieee80211_key_conf *keyconf, u8 sta_id)
135{
136 unsigned long flags;
137 __le16 key_flags = 0;
138 int ret;
139
140 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
141 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
142
143 if (sta_id == il->ctx.bcast_sta_id)
144 key_flags |= STA_KEY_MULTICAST_MSK;
145
146 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
147 keyconf->hw_key_idx = keyconf->keyidx;
148 key_flags &= ~STA_KEY_FLG_INVALID;
149
150 spin_lock_irqsave(&il->sta_lock, flags);
151 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
152 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
153 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
154
155 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
156
157 if ((il->stations[sta_id].sta.key.
158 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
159 il->stations[sta_id].sta.key.key_offset =
160 il_get_free_ucode_key_idx(il);
161 /* else, we are overriding an existing key => no need to allocated room
162 * in uCode. */
163
164 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
165 "no space for a new key");
166
167 il->stations[sta_id].sta.key.key_flags = key_flags;
168 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
169 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
170
171 D_INFO("hwcrypto: modify ucode station key info\n");
172
173 ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
174
175 spin_unlock_irqrestore(&il->sta_lock, flags);
176
177 return ret;
178}
179
180static int
181il3945_set_tkip_dynamic_key_info(struct il_priv *il,
182 struct ieee80211_key_conf *keyconf, u8 sta_id)
183{
184 return -EOPNOTSUPP;
185}
186
187static int
188il3945_set_wep_dynamic_key_info(struct il_priv *il,
189 struct ieee80211_key_conf *keyconf, u8 sta_id)
190{
191 return -EOPNOTSUPP;
192}
193
194static int
195il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
196{
197 unsigned long flags;
198 struct il_addsta_cmd sta_cmd;
199
200 spin_lock_irqsave(&il->sta_lock, flags);
201 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
202 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
203 il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
204 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
205 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
206 memcpy(&sta_cmd, &il->stations[sta_id].sta,
207 sizeof(struct il_addsta_cmd));
208 spin_unlock_irqrestore(&il->sta_lock, flags);
209
210 D_INFO("hwcrypto: clear ucode station key info\n");
211 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
212}
213
214static int
215il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
216 u8 sta_id)
217{
218 int ret = 0;
219
220 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
221
222 switch (keyconf->cipher) {
223 case WLAN_CIPHER_SUITE_CCMP:
224 ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
225 break;
226 case WLAN_CIPHER_SUITE_TKIP:
227 ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
228 break;
229 case WLAN_CIPHER_SUITE_WEP40:
230 case WLAN_CIPHER_SUITE_WEP104:
231 ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
232 break;
233 default:
234 IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
235 ret = -EINVAL;
236 }
237
238 D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
239 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
240
241 return ret;
242}
243
244static int
245il3945_remove_static_key(struct il_priv *il)
246{
247 int ret = -EOPNOTSUPP;
248
249 return ret;
250}
251
252static int
253il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
254{
255 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
256 key->cipher == WLAN_CIPHER_SUITE_WEP104)
257 return -EOPNOTSUPP;
258
259 IL_ERR("Static key invalid: cipher %x\n", key->cipher);
260 return -EINVAL;
261}
262
263static void
264il3945_clear_free_frames(struct il_priv *il)
265{
266 struct list_head *element;
267
268 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
269
270 while (!list_empty(&il->free_frames)) {
271 element = il->free_frames.next;
272 list_del(element);
273 kfree(list_entry(element, struct il3945_frame, list));
274 il->frames_count--;
275 }
276
277 if (il->frames_count) {
278 IL_WARN("%d frames still in use. Did we lose one?\n",
279 il->frames_count);
280 il->frames_count = 0;
281 }
282}
283
284static struct il3945_frame *
285il3945_get_free_frame(struct il_priv *il)
286{
287 struct il3945_frame *frame;
288 struct list_head *element;
289 if (list_empty(&il->free_frames)) {
290 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
291 if (!frame) {
292 IL_ERR("Could not allocate frame!\n");
293 return NULL;
294 }
295
296 il->frames_count++;
297 return frame;
298 }
299
300 element = il->free_frames.next;
301 list_del(element);
302 return list_entry(element, struct il3945_frame, list);
303}
304
305static void
306il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
307{
308 memset(frame, 0, sizeof(*frame));
309 list_add(&frame->list, &il->free_frames);
310}
311
312unsigned int
313il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
314 int left)
315{
316
317 if (!il_is_associated(il) || !il->beacon_skb)
318 return 0;
319
320 if (il->beacon_skb->len > left)
321 return 0;
322
323 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
324
325 return il->beacon_skb->len;
326}
327
328static int
329il3945_send_beacon_cmd(struct il_priv *il)
330{
331 struct il3945_frame *frame;
332 unsigned int frame_size;
333 int rc;
334 u8 rate;
335
336 frame = il3945_get_free_frame(il);
337
338 if (!frame) {
339 IL_ERR("Could not obtain free frame buffer for beacon "
340 "command.\n");
341 return -ENOMEM;
342 }
343
344 rate = il_get_lowest_plcp(il, &il->ctx);
345
346 frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
347
348 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
349
350 il3945_free_frame(il, frame);
351
352 return rc;
353}
354
355static void
356il3945_unset_hw_params(struct il_priv *il)
357{
358 if (il->_3945.shared_virt)
359 dma_free_coherent(&il->pci_dev->dev,
360 sizeof(struct il3945_shared),
361 il->_3945.shared_virt, il->_3945.shared_phys);
362}
363
364static void
365il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
366 struct il_device_cmd *cmd,
367 struct sk_buff *skb_frag, int sta_id)
368{
369 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
370 struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
371
372 tx_cmd->sec_ctl = 0;
373
374 switch (keyinfo->cipher) {
375 case WLAN_CIPHER_SUITE_CCMP:
376 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
377 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
378 D_TX("tx_cmd with AES hwcrypto\n");
379 break;
380
381 case WLAN_CIPHER_SUITE_TKIP:
382 break;
383
384 case WLAN_CIPHER_SUITE_WEP104:
385 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
386 /* fall through */
387 case WLAN_CIPHER_SUITE_WEP40:
388 tx_cmd->sec_ctl |=
389 TX_CMD_SEC_WEP | (info->control.hw_key->
390 hw_key_idx & TX_CMD_SEC_MSK) <<
391 TX_CMD_SEC_SHIFT;
392
393 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
394
395 D_TX("Configuring packet for WEP encryption " "with key %d\n",
396 info->control.hw_key->hw_key_idx);
397 break;
398
399 default:
400 IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
401 break;
402 }
403}
404
405/*
406 * handle build C_TX command notification.
407 */
408static void
409il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
410 struct ieee80211_tx_info *info,
411 struct ieee80211_hdr *hdr, u8 std_id)
412{
413 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
414 __le32 tx_flags = tx_cmd->tx_flags;
415 __le16 fc = hdr->frame_control;
416
417 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
418 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
419 tx_flags |= TX_CMD_FLG_ACK_MSK;
420 if (ieee80211_is_mgmt(fc))
421 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
422 if (ieee80211_is_probe_resp(fc) &&
423 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
424 tx_flags |= TX_CMD_FLG_TSF_MSK;
425 } else {
426 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
427 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
428 }
429
430 tx_cmd->sta_id = std_id;
431 if (ieee80211_has_morefrags(fc))
432 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
433
434 if (ieee80211_is_data_qos(fc)) {
435 u8 *qc = ieee80211_get_qos_ctl(hdr);
436 tx_cmd->tid_tspec = qc[0] & 0xf;
437 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
438 } else {
439 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
440 }
441
442 il_tx_cmd_protection(il, info, fc, &tx_flags);
443
444 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
445 if (ieee80211_is_mgmt(fc)) {
446 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
447 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
448 else
449 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
450 } else {
451 tx_cmd->timeout.pm_frame_timeout = 0;
452 }
453
454 tx_cmd->driver_txop = 0;
455 tx_cmd->tx_flags = tx_flags;
456 tx_cmd->next_frame_len = 0;
457}
458
459/*
460 * start C_TX command process
461 */
462static int
463il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
464{
465 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
467 struct il3945_tx_cmd *tx_cmd;
468 struct il_tx_queue *txq = NULL;
469 struct il_queue *q = NULL;
470 struct il_device_cmd *out_cmd;
471 struct il_cmd_meta *out_meta;
472 dma_addr_t phys_addr;
473 dma_addr_t txcmd_phys;
474 int txq_id = skb_get_queue_mapping(skb);
475 u16 len, idx, hdr_len;
476 u8 id;
477 u8 unicast;
478 u8 sta_id;
479 u8 tid = 0;
480 __le16 fc;
481 u8 wait_write_ptr = 0;
482 unsigned long flags;
483
484 spin_lock_irqsave(&il->lock, flags);
485 if (il_is_rfkill(il)) {
486 D_DROP("Dropping - RF KILL\n");
487 goto drop_unlock;
488 }
489
490 if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
491 IL_INVALID_RATE) {
492 IL_ERR("ERROR: No TX rate available.\n");
493 goto drop_unlock;
494 }
495
496 unicast = !is_multicast_ether_addr(hdr->addr1);
497 id = 0;
498
499 fc = hdr->frame_control;
500
501#ifdef CONFIG_IWLEGACY_DEBUG
502 if (ieee80211_is_auth(fc))
503 D_TX("Sending AUTH frame\n");
504 else if (ieee80211_is_assoc_req(fc))
505 D_TX("Sending ASSOC frame\n");
506 else if (ieee80211_is_reassoc_req(fc))
507 D_TX("Sending REASSOC frame\n");
508#endif
509
510 spin_unlock_irqrestore(&il->lock, flags);
511
512 hdr_len = ieee80211_hdrlen(fc);
513
514 /* Find idx into station table for destination station */
515 sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
516 if (sta_id == IL_INVALID_STATION) {
517 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
518 goto drop;
519 }
520
521 D_RATE("station Id %d\n", sta_id);
522
523 if (ieee80211_is_data_qos(fc)) {
524 u8 *qc = ieee80211_get_qos_ctl(hdr);
525 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
526 if (unlikely(tid >= MAX_TID_COUNT))
527 goto drop;
528 }
529
530 /* Descriptor for chosen Tx queue */
531 txq = &il->txq[txq_id];
532 q = &txq->q;
533
534 if ((il_queue_space(q) < q->high_mark))
535 goto drop;
536
537 spin_lock_irqsave(&il->lock, flags);
538
539 idx = il_get_cmd_idx(q, q->write_ptr, 0);
540
541 /* Set up driver data for this TFD */
542 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
543 txq->txb[q->write_ptr].skb = skb;
544 txq->txb[q->write_ptr].ctx = &il->ctx;
545
546 /* Init first empty entry in queue's array of Tx/cmd buffers */
547 out_cmd = txq->cmd[idx];
548 out_meta = &txq->meta[idx];
549 tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
550 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
551 memset(tx_cmd, 0, sizeof(*tx_cmd));
552
553 /*
554 * Set up the Tx-command (not MAC!) header.
555 * Store the chosen Tx queue and TFD idx within the sequence field;
556 * after Tx, uCode's Tx response will return this value so driver can
557 * locate the frame within the tx queue and do post-tx processing.
558 */
559 out_cmd->hdr.cmd = C_TX;
560 out_cmd->hdr.sequence =
561 cpu_to_le16((u16)
562 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
563
564 /* Copy MAC header from skb into command buffer */
565 memcpy(tx_cmd->hdr, hdr, hdr_len);
566
567 if (info->control.hw_key)
568 il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
569
570 /* TODO need this for burst mode later on */
571 il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
572
573 /* set is_hcca to 0; it probably will never be implemented */
574 il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id, 0);
575
576 /* Total # bytes to be transmitted */
577 len = (u16) skb->len;
578 tx_cmd->len = cpu_to_le16(len);
579
580 il_dbg_log_tx_data_frame(il, len, hdr);
581 il_update_stats(il, true, fc, len);
582 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
583 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
584
585 if (!ieee80211_has_morefrags(hdr->frame_control)) {
586 txq->need_update = 1;
587 } else {
588 wait_write_ptr = 1;
589 txq->need_update = 0;
590 }
591
592 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
593 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
594 il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
595 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
596 ieee80211_hdrlen(fc));
597
598 /*
599 * Use the first empty entry in this queue's command buffer array
600 * to contain the Tx command and MAC header concatenated together
601 * (payload data will be in another buffer).
602 * Size of this varies, due to varying MAC header length.
603 * If end is not dword aligned, we'll have 2 extra bytes at the end
604 * of the MAC header (device reads on dword boundaries).
605 * We'll tell device about this padding later.
606 */
607 len =
608 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
609 hdr_len;
610 len = (len + 3) & ~3;
611
612 /* Physical address of this Tx command's header (not MAC header!),
613 * within command buffer array. */
614 txcmd_phys =
615 pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
616 /* we do not map meta data ... so we can safely access address to
617 * provide to unmap command*/
618 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
619 dma_unmap_len_set(out_meta, len, len);
620
621 /* Add buffer containing Tx command and MAC(!) header to TFD's
622 * first entry */
623 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
624 0);
625
626 /* Set up TFD's 2nd entry to point directly to remainder of skb,
627 * if any (802.11 null frames have no payload). */
628 len = skb->len - hdr_len;
629 if (len) {
630 phys_addr =
631 pci_map_single(il->pci_dev, skb->data + hdr_len, len,
632 PCI_DMA_TODEVICE);
633 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
634 len, 0, U32_PAD(len));
635 }
636
637 /* Tell device the write idx *just past* this latest filled TFD */
638 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
639 il_txq_update_write_ptr(il, txq);
640 spin_unlock_irqrestore(&il->lock, flags);
641
642 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
643 if (wait_write_ptr) {
644 spin_lock_irqsave(&il->lock, flags);
645 txq->need_update = 1;
646 il_txq_update_write_ptr(il, txq);
647 spin_unlock_irqrestore(&il->lock, flags);
648 }
649
650 il_stop_queue(il, txq);
651 }
652
653 return 0;
654
655drop_unlock:
656 spin_unlock_irqrestore(&il->lock, flags);
657drop:
658 return -1;
659}
660
661static int
662il3945_get_measurement(struct il_priv *il,
663 struct ieee80211_measurement_params *params, u8 type)
664{
665 struct il_spectrum_cmd spectrum;
666 struct il_rx_pkt *pkt;
667 struct il_host_cmd cmd = {
668 .id = C_SPECTRUM_MEASUREMENT,
669 .data = (void *)&spectrum,
670 .flags = CMD_WANT_SKB,
671 };
672 u32 add_time = le64_to_cpu(params->start_time);
673 int rc;
674 int spectrum_resp_status;
675 int duration = le16_to_cpu(params->duration);
676 struct il_rxon_context *ctx = &il->ctx;
677
678 if (il_is_associated(il))
679 add_time =
680 il_usecs_to_beacons(il,
681 le64_to_cpu(params->start_time) -
682 il->_3945.last_tsf,
683 le16_to_cpu(ctx->timing.
684 beacon_interval));
685
686 memset(&spectrum, 0, sizeof(spectrum));
687
688 spectrum.channel_count = cpu_to_le16(1);
689 spectrum.flags =
690 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
691 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
692 cmd.len = sizeof(spectrum);
693 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
694
695 if (il_is_associated(il))
696 spectrum.start_time =
697 il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
698 le16_to_cpu(ctx->timing.
699 beacon_interval));
700 else
701 spectrum.start_time = 0;
702
703 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
704 spectrum.channels[0].channel = params->channel;
705 spectrum.channels[0].type = type;
706 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
707 spectrum.flags |=
708 RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
709 RXON_FLG_TGG_PROTECT_MSK;
710
711 rc = il_send_cmd_sync(il, &cmd);
712 if (rc)
713 return rc;
714
715 pkt = (struct il_rx_pkt *)cmd.reply_page;
716 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
717 IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
718 rc = -EIO;
719 }
720
721 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
722 switch (spectrum_resp_status) {
723 case 0: /* Command will be handled */
724 if (pkt->u.spectrum.id != 0xff) {
725 D_INFO("Replaced existing measurement: %d\n",
726 pkt->u.spectrum.id);
727 il->measurement_status &= ~MEASUREMENT_READY;
728 }
729 il->measurement_status |= MEASUREMENT_ACTIVE;
730 rc = 0;
731 break;
732
733 case 1: /* Command will not be handled */
734 rc = -EAGAIN;
735 break;
736 }
737
738 il_free_pages(il, cmd.reply_page);
739
740 return rc;
741}
742
743static void
744il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
745{
746 struct il_rx_pkt *pkt = rxb_addr(rxb);
747 struct il_alive_resp *palive;
748 struct delayed_work *pwork;
749
750 palive = &pkt->u.alive_frame;
751
752 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
753 palive->is_valid, palive->ver_type, palive->ver_subtype);
754
755 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
756 D_INFO("Initialization Alive received.\n");
757 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
758 sizeof(struct il_alive_resp));
759 pwork = &il->init_alive_start;
760 } else {
761 D_INFO("Runtime Alive received.\n");
762 memcpy(&il->card_alive, &pkt->u.alive_frame,
763 sizeof(struct il_alive_resp));
764 pwork = &il->alive_start;
765 il3945_disable_events(il);
766 }
767
768 /* We delay the ALIVE response by 5ms to
769 * give the HW RF Kill time to activate... */
770 if (palive->is_valid == UCODE_VALID_OK)
771 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
772 else
773 IL_WARN("uCode did not respond OK.\n");
774}
775
776static void
777il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
778{
779#ifdef CONFIG_IWLEGACY_DEBUG
780 struct il_rx_pkt *pkt = rxb_addr(rxb);
781#endif
782
783 D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
784}
785
786static void
787il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
788{
789 struct il_rx_pkt *pkt = rxb_addr(rxb);
790 struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
791#ifdef CONFIG_IWLEGACY_DEBUG
792 u8 rate = beacon->beacon_notify_hdr.rate;
793
794 D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
795 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
796 beacon->beacon_notify_hdr.failure_frame,
797 le32_to_cpu(beacon->ibss_mgr_status),
798 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
799#endif
800
801 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
802
803}
804
805/* Handle notification from uCode that card's power state is changing
806 * due to software, hardware, or critical temperature RFKILL */
807static void
808il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
809{
810 struct il_rx_pkt *pkt = rxb_addr(rxb);
811 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
812 unsigned long status = il->status;
813
814 IL_WARN("Card state received: HW:%s SW:%s\n",
815 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
816 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
817
818 _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
819
820 if (flags & HW_CARD_DISABLED)
821 set_bit(S_RF_KILL_HW, &il->status);
822 else
823 clear_bit(S_RF_KILL_HW, &il->status);
824
825 il_scan_cancel(il);
826
827 if ((test_bit(S_RF_KILL_HW, &status) !=
828 test_bit(S_RF_KILL_HW, &il->status)))
829 wiphy_rfkill_set_hw_state(il->hw->wiphy,
830 test_bit(S_RF_KILL_HW, &il->status));
831 else
832 wake_up(&il->wait_command_queue);
833}
834
835/**
836 * il3945_setup_handlers - Initialize Rx handler callbacks
837 *
838 * Setup the RX handlers for each of the reply types sent from the uCode
839 * to the host.
840 *
841 * This function chains into the hardware specific files for them to setup
842 * any hardware specific handlers as well.
843 */
844static void
845il3945_setup_handlers(struct il_priv *il)
846{
847 il->handlers[N_ALIVE] = il3945_hdl_alive;
848 il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
849 il->handlers[N_ERROR] = il_hdl_error;
850 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
851 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
852 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
853 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
854 il->handlers[N_BEACON] = il3945_hdl_beacon;
855
856 /*
857 * The same handler is used for both the REPLY to a discrete
858 * stats request from the host as well as for the periodic
859 * stats notifications (after received beacons) from the uCode.
860 */
861 il->handlers[C_STATS] = il3945_hdl_c_stats;
862 il->handlers[N_STATS] = il3945_hdl_stats;
863
864 il_setup_rx_scan_handlers(il);
865 il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
866
867 /* Set up hardware specific Rx handlers */
868 il3945_hw_handler_setup(il);
869}
870
871/************************** RX-FUNCTIONS ****************************/
872/*
873 * Rx theory of operation
874 *
875 * The host allocates 32 DMA target addresses and passes the host address
876 * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
877 * 0 to 31
878 *
879 * Rx Queue Indexes
880 * The host/firmware share two idx registers for managing the Rx buffers.
881 *
882 * The READ idx maps to the first position that the firmware may be writing
883 * to -- the driver can read up to (but not including) this position and get
884 * good data.
885 * The READ idx is managed by the firmware once the card is enabled.
886 *
887 * The WRITE idx maps to the last position the driver has read from -- the
888 * position preceding WRITE is the last slot the firmware can place a packet.
889 *
890 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
891 * WRITE = READ.
892 *
893 * During initialization, the host sets up the READ queue position to the first
894 * IDX position, and WRITE to the last (READ - 1 wrapped)
895 *
896 * When the firmware places a packet in a buffer, it will advance the READ idx
897 * and fire the RX interrupt. The driver can then query the READ idx and
898 * process as many packets as possible, moving the WRITE idx forward as it
899 * resets the Rx queue buffers with new memory.
900 *
901 * The management in the driver is as follows:
902 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
903 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
904 * to replenish the iwl->rxq->rx_free.
905 * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
906 * iwl->rxq is replenished and the READ IDX is updated (updating the
907 * 'processed' and 'read' driver idxes as well)
908 * + A received packet is processed and handed to the kernel network stack,
909 * detached from the iwl->rxq. The driver 'processed' idx is updated.
910 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
911 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
912 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
913 * were enough free buffers and RX_STALLED is set it is cleared.
914 *
915 *
916 * Driver sequence:
917 *
918 * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
919 * il3945_rx_queue_restock
920 * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
921 * queue, updates firmware pointers, and updates
922 * the WRITE idx. If insufficient rx_free buffers
923 * are available, schedules il3945_rx_replenish
924 *
925 * -- enable interrupts --
926 * ISR - il3945_rx() Detach il_rx_bufs from pool up to the
927 * READ IDX, detaching the SKB from the pool.
928 * Moves the packet buffer from queue to rx_used.
929 * Calls il3945_rx_queue_restock to refill any empty
930 * slots.
931 * ...
932 *
933 */
934
935/**
936 * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
937 */
938static inline __le32
939il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
940{
941 return cpu_to_le32((u32) dma_addr);
942}
943
944/**
945 * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
946 *
947 * If there are slots in the RX queue that need to be restocked,
948 * and we have free pre-allocated buffers, fill the ranks as much
949 * as we can, pulling from rx_free.
950 *
951 * This moves the 'write' idx forward to catch up with 'processed', and
952 * also updates the memory address in the firmware to reference the new
953 * target buffer.
954 */
955static void
956il3945_rx_queue_restock(struct il_priv *il)
957{
958 struct il_rx_queue *rxq = &il->rxq;
959 struct list_head *element;
960 struct il_rx_buf *rxb;
961 unsigned long flags;
962 int write;
963
964 spin_lock_irqsave(&rxq->lock, flags);
965 write = rxq->write & ~0x7;
966 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
967 /* Get next free Rx buffer, remove from free list */
968 element = rxq->rx_free.next;
969 rxb = list_entry(element, struct il_rx_buf, list);
970 list_del(element);
971
972 /* Point to Rx buffer via next RBD in circular buffer */
973 rxq->bd[rxq->write] =
974 il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
975 rxq->queue[rxq->write] = rxb;
976 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
977 rxq->free_count--;
978 }
979 spin_unlock_irqrestore(&rxq->lock, flags);
980 /* If the pre-allocated buffer pool is dropping low, schedule to
981 * refill it */
982 if (rxq->free_count <= RX_LOW_WATERMARK)
983 queue_work(il->workqueue, &il->rx_replenish);
984
985 /* If we've added more space for the firmware to place data, tell it.
986 * Increment device's write pointer in multiples of 8. */
987 if (rxq->write_actual != (rxq->write & ~0x7) ||
988 abs(rxq->write - rxq->read) > 7) {
989 spin_lock_irqsave(&rxq->lock, flags);
990 rxq->need_update = 1;
991 spin_unlock_irqrestore(&rxq->lock, flags);
992 il_rx_queue_update_write_ptr(il, rxq);
993 }
994}
995
996/**
997 * il3945_rx_replenish - Move all used packet from rx_used to rx_free
998 *
999 * When moving to rx_free an SKB is allocated for the slot.
1000 *
1001 * Also restock the Rx queue via il3945_rx_queue_restock.
1002 * This is called as a scheduled work item (except for during initialization)
1003 */
1004static void
1005il3945_rx_allocate(struct il_priv *il, gfp_t priority)
1006{
1007 struct il_rx_queue *rxq = &il->rxq;
1008 struct list_head *element;
1009 struct il_rx_buf *rxb;
1010 struct page *page;
1011 unsigned long flags;
1012 gfp_t gfp_mask = priority;
1013
1014 while (1) {
1015 spin_lock_irqsave(&rxq->lock, flags);
1016
1017 if (list_empty(&rxq->rx_used)) {
1018 spin_unlock_irqrestore(&rxq->lock, flags);
1019 return;
1020 }
1021 spin_unlock_irqrestore(&rxq->lock, flags);
1022
1023 if (rxq->free_count > RX_LOW_WATERMARK)
1024 gfp_mask |= __GFP_NOWARN;
1025
1026 if (il->hw_params.rx_page_order > 0)
1027 gfp_mask |= __GFP_COMP;
1028
1029 /* Alloc a new receive buffer */
1030 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
1031 if (!page) {
1032 if (net_ratelimit())
1033 D_INFO("Failed to allocate SKB buffer.\n");
1034 if (rxq->free_count <= RX_LOW_WATERMARK &&
1035 net_ratelimit())
1036 IL_ERR("Failed to allocate SKB buffer with %0x."
1037 "Only %u free buffers remaining.\n",
1038 priority, rxq->free_count);
1039 /* We don't reschedule replenish work here -- we will
1040 * call the restock method and if it still needs
1041 * more buffers it will schedule replenish */
1042 break;
1043 }
1044
1045 spin_lock_irqsave(&rxq->lock, flags);
1046 if (list_empty(&rxq->rx_used)) {
1047 spin_unlock_irqrestore(&rxq->lock, flags);
1048 __free_pages(page, il->hw_params.rx_page_order);
1049 return;
1050 }
1051 element = rxq->rx_used.next;
1052 rxb = list_entry(element, struct il_rx_buf, list);
1053 list_del(element);
1054 spin_unlock_irqrestore(&rxq->lock, flags);
1055
1056 rxb->page = page;
1057 /* Get physical address of RB/SKB */
1058 rxb->page_dma =
1059 pci_map_page(il->pci_dev, page, 0,
1060 PAGE_SIZE << il->hw_params.rx_page_order,
1061 PCI_DMA_FROMDEVICE);
1062
1063 spin_lock_irqsave(&rxq->lock, flags);
1064
1065 list_add_tail(&rxb->list, &rxq->rx_free);
1066 rxq->free_count++;
1067 il->alloc_rxb_page++;
1068
1069 spin_unlock_irqrestore(&rxq->lock, flags);
1070 }
1071}
1072
1073void
1074il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
1075{
1076 unsigned long flags;
1077 int i;
1078 spin_lock_irqsave(&rxq->lock, flags);
1079 INIT_LIST_HEAD(&rxq->rx_free);
1080 INIT_LIST_HEAD(&rxq->rx_used);
1081 /* Fill the rx_used queue with _all_ of the Rx buffers */
1082 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1083 /* In the reset function, these buffers may have been allocated
1084 * to an SKB, so we need to unmap and free potential storage */
1085 if (rxq->pool[i].page != NULL) {
1086 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1087 PAGE_SIZE << il->hw_params.rx_page_order,
1088 PCI_DMA_FROMDEVICE);
1089 __il_free_pages(il, rxq->pool[i].page);
1090 rxq->pool[i].page = NULL;
1091 }
1092 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1093 }
1094
1095 /* Set us so that we have processed and used all buffers, but have
1096 * not restocked the Rx queue with fresh buffers */
1097 rxq->read = rxq->write = 0;
1098 rxq->write_actual = 0;
1099 rxq->free_count = 0;
1100 spin_unlock_irqrestore(&rxq->lock, flags);
1101}
1102
1103void
1104il3945_rx_replenish(void *data)
1105{
1106 struct il_priv *il = data;
1107 unsigned long flags;
1108
1109 il3945_rx_allocate(il, GFP_KERNEL);
1110
1111 spin_lock_irqsave(&il->lock, flags);
1112 il3945_rx_queue_restock(il);
1113 spin_unlock_irqrestore(&il->lock, flags);
1114}
1115
1116static void
1117il3945_rx_replenish_now(struct il_priv *il)
1118{
1119 il3945_rx_allocate(il, GFP_ATOMIC);
1120
1121 il3945_rx_queue_restock(il);
1122}
1123
1124/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1125 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1126 * This free routine walks the list of POOL entries and if SKB is set to
1127 * non NULL it is unmapped and freed
1128 */
1129static void
1130il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
1131{
1132 int i;
1133 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1134 if (rxq->pool[i].page != NULL) {
1135 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1136 PAGE_SIZE << il->hw_params.rx_page_order,
1137 PCI_DMA_FROMDEVICE);
1138 __il_free_pages(il, rxq->pool[i].page);
1139 rxq->pool[i].page = NULL;
1140 }
1141 }
1142
1143 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1144 rxq->bd_dma);
1145 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
1146 rxq->rb_stts, rxq->rb_stts_dma);
1147 rxq->bd = NULL;
1148 rxq->rb_stts = NULL;
1149}
1150
1151/* Convert linear signal-to-noise ratio into dB */
1152static u8 ratio2dB[100] = {
1153/* 0 1 2 3 4 5 6 7 8 9 */
1154 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1155 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1156 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1157 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1158 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1159 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1160 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1161 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1162 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1163 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1164};
1165
1166/* Calculates a relative dB value from a ratio of linear
1167 * (i.e. not dB) signal levels.
1168 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1169int
1170il3945_calc_db_from_ratio(int sig_ratio)
1171{
1172 /* 1000:1 or higher just report as 60 dB */
1173 if (sig_ratio >= 1000)
1174 return 60;
1175
1176 /* 100:1 or higher, divide by 10 and use table,
1177 * add 20 dB to make up for divide by 10 */
1178 if (sig_ratio >= 100)
1179 return 20 + (int)ratio2dB[sig_ratio / 10];
1180
1181 /* We shouldn't see this */
1182 if (sig_ratio < 1)
1183 return 0;
1184
1185 /* Use table for ratios 1:1 - 99:1 */
1186 return (int)ratio2dB[sig_ratio];
1187}
1188
1189/**
1190 * il3945_rx_handle - Main entry function for receiving responses from uCode
1191 *
1192 * Uses the il->handlers callback function array to invoke
1193 * the appropriate handlers, including command responses,
1194 * frame-received notifications, and other notifications.
1195 */
1196static void
1197il3945_rx_handle(struct il_priv *il)
1198{
1199 struct il_rx_buf *rxb;
1200 struct il_rx_pkt *pkt;
1201 struct il_rx_queue *rxq = &il->rxq;
1202 u32 r, i;
1203 int reclaim;
1204 unsigned long flags;
1205 u8 fill_rx = 0;
1206 u32 count = 8;
1207 int total_empty = 0;
1208
1209 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
1210 * buffer that the driver may process (last buffer filled by ucode). */
1211 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1212 i = rxq->read;
1213
1214 /* calculate total frames need to be restock after handling RX */
1215 total_empty = r - rxq->write_actual;
1216 if (total_empty < 0)
1217 total_empty += RX_QUEUE_SIZE;
1218
1219 if (total_empty > (RX_QUEUE_SIZE / 2))
1220 fill_rx = 1;
1221 /* Rx interrupt, but nothing sent from uCode */
1222 if (i == r)
1223 D_RX("r = %d, i = %d\n", r, i);
1224
1225 while (i != r) {
1226 int len;
1227
1228 rxb = rxq->queue[i];
1229
1230 /* If an RXB doesn't have a Rx queue slot associated with it,
1231 * then a bug has been introduced in the queue refilling
1232 * routines -- catch it here */
1233 BUG_ON(rxb == NULL);
1234
1235 rxq->queue[i] = NULL;
1236
1237 pci_unmap_page(il->pci_dev, rxb->page_dma,
1238 PAGE_SIZE << il->hw_params.rx_page_order,
1239 PCI_DMA_FROMDEVICE);
1240 pkt = rxb_addr(rxb);
1241
1242 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
1243 len += sizeof(u32); /* account for status word */
1244
1245 /* Reclaim a command buffer only if this packet is a response
1246 * to a (driver-originated) command.
1247 * If the packet (e.g. Rx frame) originated from uCode,
1248 * there is no command buffer to reclaim.
1249 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1250 * but apparently a few don't get set; catch them here. */
1251 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1252 pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
1253
1254 /* Based on type of command response or notification,
1255 * handle those that need handling via function in
1256 * handlers table. See il3945_setup_handlers() */
1257 if (il->handlers[pkt->hdr.cmd]) {
1258 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
1259 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1260 il->isr_stats.handlers[pkt->hdr.cmd]++;
1261 il->handlers[pkt->hdr.cmd] (il, rxb);
1262 } else {
1263 /* No handling needed */
1264 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
1265 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1266 }
1267
1268 /*
1269 * XXX: After here, we should always check rxb->page
1270 * against NULL before touching it or its virtual
1271 * memory (pkt). Because some handler might have
1272 * already taken or freed the pages.
1273 */
1274
1275 if (reclaim) {
1276 /* Invoke any callbacks, transfer the buffer to caller,
1277 * and fire off the (possibly) blocking il_send_cmd()
1278 * as we reclaim the driver command queue */
1279 if (rxb->page)
1280 il_tx_cmd_complete(il, rxb);
1281 else
1282 IL_WARN("Claim null rxb?\n");
1283 }
1284
1285 /* Reuse the page if possible. For notification packets and
1286 * SKBs that fail to Rx correctly, add them back into the
1287 * rx_free list for reuse later. */
1288 spin_lock_irqsave(&rxq->lock, flags);
1289 if (rxb->page != NULL) {
1290 rxb->page_dma =
1291 pci_map_page(il->pci_dev, rxb->page, 0,
1292 PAGE_SIZE << il->hw_params.
1293 rx_page_order, PCI_DMA_FROMDEVICE);
1294 list_add_tail(&rxb->list, &rxq->rx_free);
1295 rxq->free_count++;
1296 } else
1297 list_add_tail(&rxb->list, &rxq->rx_used);
1298
1299 spin_unlock_irqrestore(&rxq->lock, flags);
1300
1301 i = (i + 1) & RX_QUEUE_MASK;
1302 /* If there are a lot of unused frames,
1303 * restock the Rx queue so ucode won't assert. */
1304 if (fill_rx) {
1305 count++;
1306 if (count >= 8) {
1307 rxq->read = i;
1308 il3945_rx_replenish_now(il);
1309 count = 0;
1310 }
1311 }
1312 }
1313
1314 /* Backtrack one entry */
1315 rxq->read = i;
1316 if (fill_rx)
1317 il3945_rx_replenish_now(il);
1318 else
1319 il3945_rx_queue_restock(il);
1320}
1321
1322/* call this function to flush any scheduled tasklet */
1323static inline void
1324il3945_synchronize_irq(struct il_priv *il)
1325{
1326 /* wait to make sure we flush pending tasklet */
1327 synchronize_irq(il->pci_dev->irq);
1328 tasklet_kill(&il->irq_tasklet);
1329}
1330
1331static const char *
1332il3945_desc_lookup(int i)
1333{
1334 switch (i) {
1335 case 1:
1336 return "FAIL";
1337 case 2:
1338 return "BAD_PARAM";
1339 case 3:
1340 return "BAD_CHECKSUM";
1341 case 4:
1342 return "NMI_INTERRUPT";
1343 case 5:
1344 return "SYSASSERT";
1345 case 6:
1346 return "FATAL_ERROR";
1347 }
1348
1349 return "UNKNOWN";
1350}
1351
1352#define ERROR_START_OFFSET (1 * sizeof(u32))
1353#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1354
1355void
1356il3945_dump_nic_error_log(struct il_priv *il)
1357{
1358 u32 i;
1359 u32 desc, time, count, base, data1;
1360 u32 blink1, blink2, ilink1, ilink2;
1361
1362 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
1363
1364 if (!il3945_hw_valid_rtc_data_addr(base)) {
1365 IL_ERR("Not valid error log pointer 0x%08X\n", base);
1366 return;
1367 }
1368
1369 count = il_read_targ_mem(il, base);
1370
1371 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1372 IL_ERR("Start IWL Error Log Dump:\n");
1373 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
1374 }
1375
1376 IL_ERR("Desc Time asrtPC blink2 "
1377 "ilink1 nmiPC Line\n");
1378 for (i = ERROR_START_OFFSET;
1379 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1380 i += ERROR_ELEM_SIZE) {
1381 desc = il_read_targ_mem(il, base + i);
1382 time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
1383 blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
1384 blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
1385 ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
1386 ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
1387 data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
1388
1389 IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1390 il3945_desc_lookup(desc), desc, time, blink1, blink2,
1391 ilink1, ilink2, data1);
1392 }
1393}
1394
1395static void
1396il3945_irq_tasklet(struct il_priv *il)
1397{
1398 u32 inta, handled = 0;
1399 u32 inta_fh;
1400 unsigned long flags;
1401#ifdef CONFIG_IWLEGACY_DEBUG
1402 u32 inta_mask;
1403#endif
1404
1405 spin_lock_irqsave(&il->lock, flags);
1406
1407 /* Ack/clear/reset pending uCode interrupts.
1408 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1409 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1410 inta = _il_rd(il, CSR_INT);
1411 _il_wr(il, CSR_INT, inta);
1412
1413 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1414 * Any new interrupts that happen after this, either while we're
1415 * in this tasklet, or later, will show up in next ISR/tasklet. */
1416 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1417 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
1418
1419#ifdef CONFIG_IWLEGACY_DEBUG
1420 if (il_get_debug_level(il) & IL_DL_ISR) {
1421 /* just for debug */
1422 inta_mask = _il_rd(il, CSR_INT_MASK);
1423 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
1424 inta_mask, inta_fh);
1425 }
1426#endif
1427
1428 spin_unlock_irqrestore(&il->lock, flags);
1429
1430 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1431 * atomic, make sure that inta covers all the interrupts that
1432 * we've discovered, even if FH interrupt came in just after
1433 * reading CSR_INT. */
1434 if (inta_fh & CSR39_FH_INT_RX_MASK)
1435 inta |= CSR_INT_BIT_FH_RX;
1436 if (inta_fh & CSR39_FH_INT_TX_MASK)
1437 inta |= CSR_INT_BIT_FH_TX;
1438
1439 /* Now service all interrupt bits discovered above. */
1440 if (inta & CSR_INT_BIT_HW_ERR) {
1441 IL_ERR("Hardware error detected. Restarting.\n");
1442
1443 /* Tell the device to stop sending interrupts */
1444 il_disable_interrupts(il);
1445
1446 il->isr_stats.hw++;
1447 il_irq_handle_error(il);
1448
1449 handled |= CSR_INT_BIT_HW_ERR;
1450
1451 return;
1452 }
1453#ifdef CONFIG_IWLEGACY_DEBUG
1454 if (il_get_debug_level(il) & (IL_DL_ISR)) {
1455 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1456 if (inta & CSR_INT_BIT_SCD) {
1457 D_ISR("Scheduler finished to transmit "
1458 "the frame/frames.\n");
1459 il->isr_stats.sch++;
1460 }
1461
1462 /* Alive notification via Rx interrupt will do the real work */
1463 if (inta & CSR_INT_BIT_ALIVE) {
1464 D_ISR("Alive interrupt\n");
1465 il->isr_stats.alive++;
1466 }
1467 }
1468#endif
1469 /* Safely ignore these bits for debug checks below */
1470 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1471
1472 /* Error detected by uCode */
1473 if (inta & CSR_INT_BIT_SW_ERR) {
1474 IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
1475 inta);
1476 il->isr_stats.sw++;
1477 il_irq_handle_error(il);
1478 handled |= CSR_INT_BIT_SW_ERR;
1479 }
1480
1481 /* uCode wakes up after power-down sleep */
1482 if (inta & CSR_INT_BIT_WAKEUP) {
1483 D_ISR("Wakeup interrupt\n");
1484 il_rx_queue_update_write_ptr(il, &il->rxq);
1485 il_txq_update_write_ptr(il, &il->txq[0]);
1486 il_txq_update_write_ptr(il, &il->txq[1]);
1487 il_txq_update_write_ptr(il, &il->txq[2]);
1488 il_txq_update_write_ptr(il, &il->txq[3]);
1489 il_txq_update_write_ptr(il, &il->txq[4]);
1490 il_txq_update_write_ptr(il, &il->txq[5]);
1491
1492 il->isr_stats.wakeup++;
1493 handled |= CSR_INT_BIT_WAKEUP;
1494 }
1495
1496 /* All uCode command responses, including Tx command responses,
1497 * Rx "responses" (frame-received notification), and other
1498 * notifications from uCode come through here*/
1499 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1500 il3945_rx_handle(il);
1501 il->isr_stats.rx++;
1502 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1503 }
1504
1505 if (inta & CSR_INT_BIT_FH_TX) {
1506 D_ISR("Tx interrupt\n");
1507 il->isr_stats.tx++;
1508
1509 _il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
1510 il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
1511 handled |= CSR_INT_BIT_FH_TX;
1512 }
1513
1514 if (inta & ~handled) {
1515 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1516 il->isr_stats.unhandled++;
1517 }
1518
1519 if (inta & ~il->inta_mask) {
1520 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
1521 inta & ~il->inta_mask);
1522 IL_WARN(" with inta_fh = 0x%08x\n", inta_fh);
1523 }
1524
1525 /* Re-enable all interrupts */
1526 /* only Re-enable if disabled by irq */
1527 if (test_bit(S_INT_ENABLED, &il->status))
1528 il_enable_interrupts(il);
1529
1530#ifdef CONFIG_IWLEGACY_DEBUG
1531 if (il_get_debug_level(il) & (IL_DL_ISR)) {
1532 inta = _il_rd(il, CSR_INT);
1533 inta_mask = _il_rd(il, CSR_INT_MASK);
1534 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1535 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1536 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1537 }
1538#endif
1539}
1540
1541static int
1542il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
1543 u8 is_active, u8 n_probes,
1544 struct il3945_scan_channel *scan_ch,
1545 struct ieee80211_vif *vif)
1546{
1547 struct ieee80211_channel *chan;
1548 const struct ieee80211_supported_band *sband;
1549 const struct il_channel_info *ch_info;
1550 u16 passive_dwell = 0;
1551 u16 active_dwell = 0;
1552 int added, i;
1553
1554 sband = il_get_hw_mode(il, band);
1555 if (!sband)
1556 return 0;
1557
1558 active_dwell = il_get_active_dwell_time(il, band, n_probes);
1559 passive_dwell = il_get_passive_dwell_time(il, band, vif);
1560
1561 if (passive_dwell <= active_dwell)
1562 passive_dwell = active_dwell + 1;
1563
1564 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
1565 chan = il->scan_request->channels[i];
1566
1567 if (chan->band != band)
1568 continue;
1569
1570 scan_ch->channel = chan->hw_value;
1571
1572 ch_info = il_get_channel_info(il, band, scan_ch->channel);
1573 if (!il_is_channel_valid(ch_info)) {
1574 D_SCAN("Channel %d is INVALID for this band.\n",
1575 scan_ch->channel);
1576 continue;
1577 }
1578
1579 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1580 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1581 /* If passive , set up for auto-switch
1582 * and use long active_dwell time.
1583 */
1584 if (!is_active || il_is_channel_passive(ch_info) ||
1585 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1586 scan_ch->type = 0; /* passive */
1587 if (IL_UCODE_API(il->ucode_ver) == 1)
1588 scan_ch->active_dwell =
1589 cpu_to_le16(passive_dwell - 1);
1590 } else {
1591 scan_ch->type = 1; /* active */
1592 }
1593
1594 /* Set direct probe bits. These may be used both for active
1595 * scan channels (probes gets sent right away),
1596 * or for passive channels (probes get se sent only after
1597 * hearing clear Rx packet).*/
1598 if (IL_UCODE_API(il->ucode_ver) >= 2) {
1599 if (n_probes)
1600 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1601 } else {
1602 /* uCode v1 does not allow setting direct probe bits on
1603 * passive channel. */
1604 if ((scan_ch->type & 1) && n_probes)
1605 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1606 }
1607
1608 /* Set txpower levels to defaults */
1609 scan_ch->tpc.dsp_atten = 110;
1610 /* scan_pwr_info->tpc.dsp_atten; */
1611
1612 /*scan_pwr_info->tpc.tx_gain; */
1613 if (band == IEEE80211_BAND_5GHZ)
1614 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1615 else {
1616 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1617 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1618 * power level:
1619 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1620 */
1621 }
1622
1623 D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
1624 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1625 (scan_ch->type & 1) ? active_dwell : passive_dwell);
1626
1627 scan_ch++;
1628 added++;
1629 }
1630
1631 D_SCAN("total channels to scan %d\n", added);
1632 return added;
1633}
1634
1635static void
1636il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
1637{
1638 int i;
1639
1640 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
1641 rates[i].bitrate = il3945_rates[i].ieee * 5;
1642 rates[i].hw_value = i; /* Rate scaling will work on idxes */
1643 rates[i].hw_value_short = i;
1644 rates[i].flags = 0;
1645 if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
1646 /*
1647 * If CCK != 1M then set short preamble rate flag.
1648 */
1649 rates[i].flags |=
1650 (il3945_rates[i].plcp ==
1651 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1652 }
1653 }
1654}
1655
1656/******************************************************************************
1657 *
1658 * uCode download functions
1659 *
1660 ******************************************************************************/
1661
1662static void
1663il3945_dealloc_ucode_pci(struct il_priv *il)
1664{
1665 il_free_fw_desc(il->pci_dev, &il->ucode_code);
1666 il_free_fw_desc(il->pci_dev, &il->ucode_data);
1667 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
1668 il_free_fw_desc(il->pci_dev, &il->ucode_init);
1669 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
1670 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
1671}
1672
1673/**
1674 * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
1675 * looking at all data.
1676 */
1677static int
1678il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
1679{
1680 u32 val;
1681 u32 save_len = len;
1682 int rc = 0;
1683 u32 errcnt;
1684
1685 D_INFO("ucode inst image size is %u\n", len);
1686
1687 il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
1688
1689 errcnt = 0;
1690 for (; len > 0; len -= sizeof(u32), image++) {
1691 /* read data comes through single port, auto-incr addr */
1692 /* NOTE: Use the debugless read so we don't flood kernel log
1693 * if IL_DL_IO is set */
1694 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1695 if (val != le32_to_cpu(*image)) {
1696 IL_ERR("uCode INST section is invalid at "
1697 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1698 save_len - len, val, le32_to_cpu(*image));
1699 rc = -EIO;
1700 errcnt++;
1701 if (errcnt >= 20)
1702 break;
1703 }
1704 }
1705
1706 if (!errcnt)
1707 D_INFO("ucode image in INSTRUCTION memory is good\n");
1708
1709 return rc;
1710}
1711
1712/**
1713 * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1714 * using sample data 100 bytes apart. If these sample points are good,
1715 * it's a pretty good bet that everything between them is good, too.
1716 */
1717static int
1718il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
1719{
1720 u32 val;
1721 int rc = 0;
1722 u32 errcnt = 0;
1723 u32 i;
1724
1725 D_INFO("ucode inst image size is %u\n", len);
1726
1727 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
1728 /* read data comes through single port, auto-incr addr */
1729 /* NOTE: Use the debugless read so we don't flood kernel log
1730 * if IL_DL_IO is set */
1731 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
1732 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1733 if (val != le32_to_cpu(*image)) {
1734#if 0 /* Enable this if you want to see details */
1735 IL_ERR("uCode INST section is invalid at "
1736 "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
1737 *image);
1738#endif
1739 rc = -EIO;
1740 errcnt++;
1741 if (errcnt >= 3)
1742 break;
1743 }
1744 }
1745
1746 return rc;
1747}
1748
1749/**
1750 * il3945_verify_ucode - determine which instruction image is in SRAM,
1751 * and verify its contents
1752 */
1753static int
1754il3945_verify_ucode(struct il_priv *il)
1755{
1756 __le32 *image;
1757 u32 len;
1758 int rc = 0;
1759
1760 /* Try bootstrap */
1761 image = (__le32 *) il->ucode_boot.v_addr;
1762 len = il->ucode_boot.len;
1763 rc = il3945_verify_inst_sparse(il, image, len);
1764 if (rc == 0) {
1765 D_INFO("Bootstrap uCode is good in inst SRAM\n");
1766 return 0;
1767 }
1768
1769 /* Try initialize */
1770 image = (__le32 *) il->ucode_init.v_addr;
1771 len = il->ucode_init.len;
1772 rc = il3945_verify_inst_sparse(il, image, len);
1773 if (rc == 0) {
1774 D_INFO("Initialize uCode is good in inst SRAM\n");
1775 return 0;
1776 }
1777
1778 /* Try runtime/protocol */
1779 image = (__le32 *) il->ucode_code.v_addr;
1780 len = il->ucode_code.len;
1781 rc = il3945_verify_inst_sparse(il, image, len);
1782 if (rc == 0) {
1783 D_INFO("Runtime uCode is good in inst SRAM\n");
1784 return 0;
1785 }
1786
1787 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1788
1789 /* Since nothing seems to match, show first several data entries in
1790 * instruction SRAM, so maybe visual inspection will give a clue.
1791 * Selection of bootstrap image (vs. other images) is arbitrary. */
1792 image = (__le32 *) il->ucode_boot.v_addr;
1793 len = il->ucode_boot.len;
1794 rc = il3945_verify_inst_full(il, image, len);
1795
1796 return rc;
1797}
1798
1799static void
1800il3945_nic_start(struct il_priv *il)
1801{
1802 /* Remove all resets to allow NIC to operate */
1803 _il_wr(il, CSR_RESET, 0);
1804}
1805
1806#define IL3945_UCODE_GET(item) \
1807static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
1808{ \
1809 return le32_to_cpu(ucode->v1.item); \
1810}
1811
1812static u32
1813il3945_ucode_get_header_size(u32 api_ver)
1814{
1815 return 24;
1816}
1817
1818static u8 *
1819il3945_ucode_get_data(const struct il_ucode_header *ucode)
1820{
1821 return (u8 *) ucode->v1.data;
1822}
1823
1824IL3945_UCODE_GET(inst_size);
1825IL3945_UCODE_GET(data_size);
1826IL3945_UCODE_GET(init_size);
1827IL3945_UCODE_GET(init_data_size);
1828IL3945_UCODE_GET(boot_size);
1829
1830/**
1831 * il3945_read_ucode - Read uCode images from disk file.
1832 *
1833 * Copy into buffers for card to fetch via bus-mastering
1834 */
1835static int
1836il3945_read_ucode(struct il_priv *il)
1837{
1838 const struct il_ucode_header *ucode;
1839 int ret = -EINVAL, idx;
1840 const struct firmware *ucode_raw;
1841 /* firmware file name contains uCode/driver compatibility version */
1842 const char *name_pre = il->cfg->fw_name_pre;
1843 const unsigned int api_max = il->cfg->ucode_api_max;
1844 const unsigned int api_min = il->cfg->ucode_api_min;
1845 char buf[25];
1846 u8 *src;
1847 size_t len;
1848 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1849
1850 /* Ask kernel firmware_class module to get the boot firmware off disk.
1851 * request_firmware() is synchronous, file is in memory on return. */
1852 for (idx = api_max; idx >= api_min; idx--) {
1853 sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
1854 ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
1855 if (ret < 0) {
1856 IL_ERR("%s firmware file req failed: %d\n", buf, ret);
1857 if (ret == -ENOENT)
1858 continue;
1859 else
1860 goto error;
1861 } else {
1862 if (idx < api_max)
1863 IL_ERR("Loaded firmware %s, "
1864 "which is deprecated. "
1865 " Please use API v%u instead.\n", buf,
1866 api_max);
1867 D_INFO("Got firmware '%s' file "
1868 "(%zd bytes) from disk\n", buf, ucode_raw->size);
1869 break;
1870 }
1871 }
1872
1873 if (ret < 0)
1874 goto error;
1875
1876 /* Make sure that we got at least our header! */
1877 if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
1878 IL_ERR("File size way too small!\n");
1879 ret = -EINVAL;
1880 goto err_release;
1881 }
1882
1883 /* Data from ucode file: header followed by uCode images */
1884 ucode = (struct il_ucode_header *)ucode_raw->data;
1885
1886 il->ucode_ver = le32_to_cpu(ucode->ver);
1887 api_ver = IL_UCODE_API(il->ucode_ver);
1888 inst_size = il3945_ucode_get_inst_size(ucode);
1889 data_size = il3945_ucode_get_data_size(ucode);
1890 init_size = il3945_ucode_get_init_size(ucode);
1891 init_data_size = il3945_ucode_get_init_data_size(ucode);
1892 boot_size = il3945_ucode_get_boot_size(ucode);
1893 src = il3945_ucode_get_data(ucode);
1894
1895 /* api_ver should match the api version forming part of the
1896 * firmware filename ... but we don't check for that and only rely
1897 * on the API version read from firmware header from here on forward */
1898
1899 if (api_ver < api_min || api_ver > api_max) {
1900 IL_ERR("Driver unable to support your firmware API. "
1901 "Driver supports v%u, firmware is v%u.\n", api_max,
1902 api_ver);
1903 il->ucode_ver = 0;
1904 ret = -EINVAL;
1905 goto err_release;
1906 }
1907 if (api_ver != api_max)
1908 IL_ERR("Firmware has old API version. Expected %u, "
1909 "got %u. New firmware can be obtained "
1910 "from http://www.intellinuxwireless.org.\n", api_max,
1911 api_ver);
1912
1913 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
1914 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
1915 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
1916
1917 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
1918 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
1919 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
1920 IL_UCODE_SERIAL(il->ucode_ver));
1921
1922 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
1923 D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
1924 D_INFO("f/w package hdr runtime data size = %u\n", data_size);
1925 D_INFO("f/w package hdr init inst size = %u\n", init_size);
1926 D_INFO("f/w package hdr init data size = %u\n", init_data_size);
1927 D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
1928
1929 /* Verify size of file vs. image size info in file's header */
1930 if (ucode_raw->size !=
1931 il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
1932 init_size + init_data_size + boot_size) {
1933
1934 D_INFO("uCode file size %zd does not match expected size\n",
1935 ucode_raw->size);
1936 ret = -EINVAL;
1937 goto err_release;
1938 }
1939
1940 /* Verify that uCode images will fit in card's SRAM */
1941 if (inst_size > IL39_MAX_INST_SIZE) {
1942 D_INFO("uCode instr len %d too large to fit in\n", inst_size);
1943 ret = -EINVAL;
1944 goto err_release;
1945 }
1946
1947 if (data_size > IL39_MAX_DATA_SIZE) {
1948 D_INFO("uCode data len %d too large to fit in\n", data_size);
1949 ret = -EINVAL;
1950 goto err_release;
1951 }
1952 if (init_size > IL39_MAX_INST_SIZE) {
1953 D_INFO("uCode init instr len %d too large to fit in\n",
1954 init_size);
1955 ret = -EINVAL;
1956 goto err_release;
1957 }
1958 if (init_data_size > IL39_MAX_DATA_SIZE) {
1959 D_INFO("uCode init data len %d too large to fit in\n",
1960 init_data_size);
1961 ret = -EINVAL;
1962 goto err_release;
1963 }
1964 if (boot_size > IL39_MAX_BSM_SIZE) {
1965 D_INFO("uCode boot instr len %d too large to fit in\n",
1966 boot_size);
1967 ret = -EINVAL;
1968 goto err_release;
1969 }
1970
1971 /* Allocate ucode buffers for card's bus-master loading ... */
1972
1973 /* Runtime instructions and 2 copies of data:
1974 * 1) unmodified from disk
1975 * 2) backup cache for save/restore during power-downs */
1976 il->ucode_code.len = inst_size;
1977 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
1978
1979 il->ucode_data.len = data_size;
1980 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
1981
1982 il->ucode_data_backup.len = data_size;
1983 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
1984
1985 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
1986 !il->ucode_data_backup.v_addr)
1987 goto err_pci_alloc;
1988
1989 /* Initialization instructions and data */
1990 if (init_size && init_data_size) {
1991 il->ucode_init.len = init_size;
1992 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
1993
1994 il->ucode_init_data.len = init_data_size;
1995 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
1996
1997 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
1998 goto err_pci_alloc;
1999 }
2000
2001 /* Bootstrap (instructions only, no data) */
2002 if (boot_size) {
2003 il->ucode_boot.len = boot_size;
2004 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
2005
2006 if (!il->ucode_boot.v_addr)
2007 goto err_pci_alloc;
2008 }
2009
2010 /* Copy images into buffers for card's bus-master reads ... */
2011
2012 /* Runtime instructions (first block of data in file) */
2013 len = inst_size;
2014 D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
2015 memcpy(il->ucode_code.v_addr, src, len);
2016 src += len;
2017
2018 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2019 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
2020
2021 /* Runtime data (2nd block)
2022 * NOTE: Copy into backup buffer will be done in il3945_up() */
2023 len = data_size;
2024 D_INFO("Copying (but not loading) uCode data len %zd\n", len);
2025 memcpy(il->ucode_data.v_addr, src, len);
2026 memcpy(il->ucode_data_backup.v_addr, src, len);
2027 src += len;
2028
2029 /* Initialization instructions (3rd block) */
2030 if (init_size) {
2031 len = init_size;
2032 D_INFO("Copying (but not loading) init instr len %zd\n", len);
2033 memcpy(il->ucode_init.v_addr, src, len);
2034 src += len;
2035 }
2036
2037 /* Initialization data (4th block) */
2038 if (init_data_size) {
2039 len = init_data_size;
2040 D_INFO("Copying (but not loading) init data len %zd\n", len);
2041 memcpy(il->ucode_init_data.v_addr, src, len);
2042 src += len;
2043 }
2044
2045 /* Bootstrap instructions (5th block) */
2046 len = boot_size;
2047 D_INFO("Copying (but not loading) boot instr len %zd\n", len);
2048 memcpy(il->ucode_boot.v_addr, src, len);
2049
2050 /* We have our copies now, allow OS release its copies */
2051 release_firmware(ucode_raw);
2052 return 0;
2053
2054err_pci_alloc:
2055 IL_ERR("failed to allocate pci memory\n");
2056 ret = -ENOMEM;
2057 il3945_dealloc_ucode_pci(il);
2058
2059err_release:
2060 release_firmware(ucode_raw);
2061
2062error:
2063 return ret;
2064}
2065
2066/**
2067 * il3945_set_ucode_ptrs - Set uCode address location
2068 *
2069 * Tell initialization uCode where to find runtime uCode.
2070 *
2071 * BSM registers initially contain pointers to initialization uCode.
2072 * We need to replace them to load runtime uCode inst and data,
2073 * and to save runtime data when powering down.
2074 */
2075static int
2076il3945_set_ucode_ptrs(struct il_priv *il)
2077{
2078 dma_addr_t pinst;
2079 dma_addr_t pdata;
2080
2081 /* bits 31:0 for 3945 */
2082 pinst = il->ucode_code.p_addr;
2083 pdata = il->ucode_data_backup.p_addr;
2084
2085 /* Tell bootstrap uCode where to find image to load */
2086 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2087 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2088 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
2089
2090 /* Inst byte count must be last to set up, bit 31 signals uCode
2091 * that all new ptr/size info is in place */
2092 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
2093 il->ucode_code.len | BSM_DRAM_INST_LOAD);
2094
2095 D_INFO("Runtime uCode pointers are set.\n");
2096
2097 return 0;
2098}
2099
2100/**
2101 * il3945_init_alive_start - Called after N_ALIVE notification received
2102 *
2103 * Called after N_ALIVE notification received from "initialize" uCode.
2104 *
2105 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2106 */
2107static void
2108il3945_init_alive_start(struct il_priv *il)
2109{
2110 /* Check alive response for "valid" sign from uCode */
2111 if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
2112 /* We had an error bringing up the hardware, so take it
2113 * all the way back down so we can try again */
2114 D_INFO("Initialize Alive failed.\n");
2115 goto restart;
2116 }
2117
2118 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2119 * This is a paranoid check, because we would not have gotten the
2120 * "initialize" alive if code weren't properly loaded. */
2121 if (il3945_verify_ucode(il)) {
2122 /* Runtime instruction load was bad;
2123 * take it all the way back down so we can try again */
2124 D_INFO("Bad \"initialize\" uCode load.\n");
2125 goto restart;
2126 }
2127
2128 /* Send pointers to protocol/runtime uCode image ... init code will
2129 * load and launch runtime uCode, which will send us another "Alive"
2130 * notification. */
2131 D_INFO("Initialization Alive received.\n");
2132 if (il3945_set_ucode_ptrs(il)) {
2133 /* Runtime instruction load won't happen;
2134 * take it all the way back down so we can try again */
2135 D_INFO("Couldn't set up uCode pointers.\n");
2136 goto restart;
2137 }
2138 return;
2139
2140restart:
2141 queue_work(il->workqueue, &il->restart);
2142}
2143
2144/**
2145 * il3945_alive_start - called after N_ALIVE notification received
2146 * from protocol/runtime uCode (initialization uCode's
2147 * Alive gets handled by il3945_init_alive_start()).
2148 */
2149static void
2150il3945_alive_start(struct il_priv *il)
2151{
2152 int thermal_spin = 0;
2153 u32 rfkill;
2154 struct il_rxon_context *ctx = &il->ctx;
2155
2156 D_INFO("Runtime Alive received.\n");
2157
2158 if (il->card_alive.is_valid != UCODE_VALID_OK) {
2159 /* We had an error bringing up the hardware, so take it
2160 * all the way back down so we can try again */
2161 D_INFO("Alive failed.\n");
2162 goto restart;
2163 }
2164
2165 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2166 * This is a paranoid check, because we would not have gotten the
2167 * "runtime" alive if code weren't properly loaded. */
2168 if (il3945_verify_ucode(il)) {
2169 /* Runtime instruction load was bad;
2170 * take it all the way back down so we can try again */
2171 D_INFO("Bad runtime uCode load.\n");
2172 goto restart;
2173 }
2174
2175 rfkill = il_rd_prph(il, APMG_RFKILL_REG);
2176 D_INFO("RFKILL status: 0x%x\n", rfkill);
2177
2178 if (rfkill & 0x1) {
2179 clear_bit(S_RF_KILL_HW, &il->status);
2180 /* if RFKILL is not on, then wait for thermal
2181 * sensor in adapter to kick in */
2182 while (il3945_hw_get_temperature(il) == 0) {
2183 thermal_spin++;
2184 udelay(10);
2185 }
2186
2187 if (thermal_spin)
2188 D_INFO("Thermal calibration took %dus\n",
2189 thermal_spin * 10);
2190 } else
2191 set_bit(S_RF_KILL_HW, &il->status);
2192
2193 /* After the ALIVE response, we can send commands to 3945 uCode */
2194 set_bit(S_ALIVE, &il->status);
2195
2196 /* Enable watchdog to monitor the driver tx queues */
2197 il_setup_watchdog(il);
2198
2199 if (il_is_rfkill(il))
2200 return;
2201
2202 ieee80211_wake_queues(il->hw);
2203
2204 il->active_rate = RATES_MASK_3945;
2205
2206 il_power_update_mode(il, true);
2207
2208 if (il_is_associated(il)) {
2209 struct il3945_rxon_cmd *active_rxon =
2210 (struct il3945_rxon_cmd *)(&ctx->active);
2211
2212 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2213 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2214 } else {
2215 /* Initialize our rx_config data */
2216 il_connection_init_rx_config(il, ctx);
2217 }
2218
2219 /* Configure Bluetooth device coexistence support */
2220 il_send_bt_config(il);
2221
2222 set_bit(S_READY, &il->status);
2223
2224 /* Configure the adapter for unassociated operation */
2225 il3945_commit_rxon(il, ctx);
2226
2227 il3945_reg_txpower_periodic(il);
2228
2229 D_INFO("ALIVE processing complete.\n");
2230 wake_up(&il->wait_command_queue);
2231
2232 return;
2233
2234restart:
2235 queue_work(il->workqueue, &il->restart);
2236}
2237
2238static void il3945_cancel_deferred_work(struct il_priv *il);
2239
2240static void
2241__il3945_down(struct il_priv *il)
2242{
2243 unsigned long flags;
2244 int exit_pending;
2245
2246 D_INFO(DRV_NAME " is going down\n");
2247
2248 il_scan_cancel_timeout(il, 200);
2249
2250 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
2251
2252 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
2253 * to prevent rearm timer */
2254 del_timer_sync(&il->watchdog);
2255
2256 /* Station information will now be cleared in device */
2257 il_clear_ucode_stations(il, NULL);
2258 il_dealloc_bcast_stations(il);
2259 il_clear_driver_stations(il);
2260
2261 /* Unblock any waiting calls */
2262 wake_up_all(&il->wait_command_queue);
2263
2264 /* Wipe out the EXIT_PENDING status bit if we are not actually
2265 * exiting the module */
2266 if (!exit_pending)
2267 clear_bit(S_EXIT_PENDING, &il->status);
2268
2269 /* stop and reset the on-board processor */
2270 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2271
2272 /* tell the device to stop sending interrupts */
2273 spin_lock_irqsave(&il->lock, flags);
2274 il_disable_interrupts(il);
2275 spin_unlock_irqrestore(&il->lock, flags);
2276 il3945_synchronize_irq(il);
2277
2278 if (il->mac80211_registered)
2279 ieee80211_stop_queues(il->hw);
2280
2281 /* If we have not previously called il3945_init() then
2282 * clear all bits but the RF Kill bits and return */
2283 if (!il_is_init(il)) {
2284 il->status =
2285 test_bit(S_RF_KILL_HW,
2286 &il->
2287 status) << S_RF_KILL_HW |
2288 test_bit(S_GEO_CONFIGURED,
2289 &il->
2290 status) << S_GEO_CONFIGURED |
2291 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2292 goto exit;
2293 }
2294
2295 /* ...otherwise clear out all the status bits but the RF Kill
2296 * bit and continue taking the NIC down. */
2297 il->status &=
2298 test_bit(S_RF_KILL_HW,
2299 &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
2300 &il->
2301 status) <<
2302 S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
2303 &il->
2304 status) << S_FW_ERROR |
2305 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2306
2307 il3945_hw_txq_ctx_stop(il);
2308 il3945_hw_rxq_stop(il);
2309
2310 /* Power-down device's busmaster DMA clocks */
2311 il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2312 udelay(5);
2313
2314 /* Stop the device, and put it in low power state */
2315 il_apm_stop(il);
2316
2317exit:
2318 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
2319
2320 if (il->beacon_skb)
2321 dev_kfree_skb(il->beacon_skb);
2322 il->beacon_skb = NULL;
2323
2324 /* clear out any free frames */
2325 il3945_clear_free_frames(il);
2326}
2327
2328static void
2329il3945_down(struct il_priv *il)
2330{
2331 mutex_lock(&il->mutex);
2332 __il3945_down(il);
2333 mutex_unlock(&il->mutex);
2334
2335 il3945_cancel_deferred_work(il);
2336}
2337
2338#define MAX_HW_RESTARTS 5
2339
2340static int
2341il3945_alloc_bcast_station(struct il_priv *il)
2342{
2343 struct il_rxon_context *ctx = &il->ctx;
2344 unsigned long flags;
2345 u8 sta_id;
2346
2347 spin_lock_irqsave(&il->sta_lock, flags);
2348 sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
2349 if (sta_id == IL_INVALID_STATION) {
2350 IL_ERR("Unable to prepare broadcast station\n");
2351 spin_unlock_irqrestore(&il->sta_lock, flags);
2352
2353 return -EINVAL;
2354 }
2355
2356 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
2357 il->stations[sta_id].used |= IL_STA_BCAST;
2358 spin_unlock_irqrestore(&il->sta_lock, flags);
2359
2360 return 0;
2361}
2362
2363static int
2364__il3945_up(struct il_priv *il)
2365{
2366 int rc, i;
2367
2368 rc = il3945_alloc_bcast_station(il);
2369 if (rc)
2370 return rc;
2371
2372 if (test_bit(S_EXIT_PENDING, &il->status)) {
2373 IL_WARN("Exit pending; will not bring the NIC up\n");
2374 return -EIO;
2375 }
2376
2377 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
2378 IL_ERR("ucode not available for device bring up\n");
2379 return -EIO;
2380 }
2381
2382 /* If platform's RF_KILL switch is NOT set to KILL */
2383 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2384 clear_bit(S_RF_KILL_HW, &il->status);
2385 else {
2386 set_bit(S_RF_KILL_HW, &il->status);
2387 IL_WARN("Radio disabled by HW RF Kill switch\n");
2388 return -ENODEV;
2389 }
2390
2391 _il_wr(il, CSR_INT, 0xFFFFFFFF);
2392
2393 rc = il3945_hw_nic_init(il);
2394 if (rc) {
2395 IL_ERR("Unable to int nic\n");
2396 return rc;
2397 }
2398
2399 /* make sure rfkill handshake bits are cleared */
2400 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2401 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2402
2403 /* clear (again), then enable host interrupts */
2404 _il_wr(il, CSR_INT, 0xFFFFFFFF);
2405 il_enable_interrupts(il);
2406
2407 /* really make sure rfkill handshake bits are cleared */
2408 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2409 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2410
2411 /* Copy original ucode data image from disk into backup cache.
2412 * This will be used to initialize the on-board processor's
2413 * data SRAM for a clean start when the runtime program first loads. */
2414 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
2415 il->ucode_data.len);
2416
2417 /* We return success when we resume from suspend and rf_kill is on. */
2418 if (test_bit(S_RF_KILL_HW, &il->status))
2419 return 0;
2420
2421 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2422
2423 /* load bootstrap state machine,
2424 * load bootstrap program into processor's memory,
2425 * prepare to load the "initialize" uCode */
2426 rc = il->cfg->ops->lib->load_ucode(il);
2427
2428 if (rc) {
2429 IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
2430 continue;
2431 }
2432
2433 /* start card; "initialize" will load runtime ucode */
2434 il3945_nic_start(il);
2435
2436 D_INFO(DRV_NAME " is coming up\n");
2437
2438 return 0;
2439 }
2440
2441 set_bit(S_EXIT_PENDING, &il->status);
2442 __il3945_down(il);
2443 clear_bit(S_EXIT_PENDING, &il->status);
2444
2445 /* tried to restart and config the device for as long as our
2446 * patience could withstand */
2447 IL_ERR("Unable to initialize device after %d attempts.\n", i);
2448 return -EIO;
2449}
2450
2451/*****************************************************************************
2452 *
2453 * Workqueue callbacks
2454 *
2455 *****************************************************************************/
2456
2457static void
2458il3945_bg_init_alive_start(struct work_struct *data)
2459{
2460 struct il_priv *il =
2461 container_of(data, struct il_priv, init_alive_start.work);
2462
2463 mutex_lock(&il->mutex);
2464 if (test_bit(S_EXIT_PENDING, &il->status))
2465 goto out;
2466
2467 il3945_init_alive_start(il);
2468out:
2469 mutex_unlock(&il->mutex);
2470}
2471
2472static void
2473il3945_bg_alive_start(struct work_struct *data)
2474{
2475 struct il_priv *il =
2476 container_of(data, struct il_priv, alive_start.work);
2477
2478 mutex_lock(&il->mutex);
2479 if (test_bit(S_EXIT_PENDING, &il->status))
2480 goto out;
2481
2482 il3945_alive_start(il);
2483out:
2484 mutex_unlock(&il->mutex);
2485}
2486
2487/*
2488 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2489 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2490 * *is* readable even when device has been SW_RESET into low power mode
2491 * (e.g. during RF KILL).
2492 */
2493static void
2494il3945_rfkill_poll(struct work_struct *data)
2495{
2496 struct il_priv *il =
2497 container_of(data, struct il_priv, _3945.rfkill_poll.work);
2498 bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
2499 bool new_rfkill =
2500 !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2501
2502 if (new_rfkill != old_rfkill) {
2503 if (new_rfkill)
2504 set_bit(S_RF_KILL_HW, &il->status);
2505 else
2506 clear_bit(S_RF_KILL_HW, &il->status);
2507
2508 wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
2509
2510 D_RF_KILL("RF_KILL bit toggled to %s.\n",
2511 new_rfkill ? "disable radio" : "enable radio");
2512 }
2513
2514 /* Keep this running, even if radio now enabled. This will be
2515 * cancelled in mac_start() if system decides to start again */
2516 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2517 round_jiffies_relative(2 * HZ));
2518
2519}
2520
2521int
2522il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
2523{
2524 struct il_host_cmd cmd = {
2525 .id = C_SCAN,
2526 .len = sizeof(struct il3945_scan_cmd),
2527 .flags = CMD_SIZE_HUGE,
2528 };
2529 struct il3945_scan_cmd *scan;
2530 u8 n_probes = 0;
2531 enum ieee80211_band band;
2532 bool is_active = false;
2533 int ret;
2534 u16 len;
2535
2536 lockdep_assert_held(&il->mutex);
2537
2538 if (!il->scan_cmd) {
2539 il->scan_cmd =
2540 kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
2541 GFP_KERNEL);
2542 if (!il->scan_cmd) {
2543 D_SCAN("Fail to allocate scan memory\n");
2544 return -ENOMEM;
2545 }
2546 }
2547 scan = il->scan_cmd;
2548 memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
2549
2550 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
2551 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
2552
2553 if (il_is_associated(il)) {
2554 u16 interval;
2555 u32 extra;
2556 u32 suspend_time = 100;
2557 u32 scan_suspend_time = 100;
2558
2559 D_INFO("Scanning while associated...\n");
2560
2561 interval = vif->bss_conf.beacon_int;
2562
2563 scan->suspend_time = 0;
2564 scan->max_out_time = cpu_to_le32(200 * 1024);
2565 if (!interval)
2566 interval = suspend_time;
2567 /*
2568 * suspend time format:
2569 * 0-19: beacon interval in usec (time before exec.)
2570 * 20-23: 0
2571 * 24-31: number of beacons (suspend between channels)
2572 */
2573
2574 extra = (suspend_time / interval) << 24;
2575 scan_suspend_time =
2576 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
2577
2578 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2579 D_SCAN("suspend_time 0x%X beacon interval %d\n",
2580 scan_suspend_time, interval);
2581 }
2582
2583 if (il->scan_request->n_ssids) {
2584 int i, p = 0;
2585 D_SCAN("Kicking off active scan\n");
2586 for (i = 0; i < il->scan_request->n_ssids; i++) {
2587 /* always does wildcard anyway */
2588 if (!il->scan_request->ssids[i].ssid_len)
2589 continue;
2590 scan->direct_scan[p].id = WLAN_EID_SSID;
2591 scan->direct_scan[p].len =
2592 il->scan_request->ssids[i].ssid_len;
2593 memcpy(scan->direct_scan[p].ssid,
2594 il->scan_request->ssids[i].ssid,
2595 il->scan_request->ssids[i].ssid_len);
2596 n_probes++;
2597 p++;
2598 }
2599 is_active = true;
2600 } else
2601 D_SCAN("Kicking off passive scan.\n");
2602
2603 /* We don't build a direct scan probe request; the uCode will do
2604 * that based on the direct_mask added to each channel entry */
2605 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2606 scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
2607 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2608
2609 /* flags + rate selection */
2610
2611 switch (il->scan_band) {
2612 case IEEE80211_BAND_2GHZ:
2613 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2614 scan->tx_cmd.rate = RATE_1M_PLCP;
2615 band = IEEE80211_BAND_2GHZ;
2616 break;
2617 case IEEE80211_BAND_5GHZ:
2618 scan->tx_cmd.rate = RATE_6M_PLCP;
2619 band = IEEE80211_BAND_5GHZ;
2620 break;
2621 default:
2622 IL_WARN("Invalid scan band\n");
2623 return -EIO;
2624 }
2625
2626 /*
2627 * If active scaning is requested but a certain channel
2628 * is marked passive, we can do active scanning if we
2629 * detect transmissions.
2630 */
2631 scan->good_CRC_th =
2632 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_DISABLED;
2633
2634 len =
2635 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
2636 vif->addr, il->scan_request->ie,
2637 il->scan_request->ie_len,
2638 IL_MAX_SCAN_SIZE - sizeof(*scan));
2639 scan->tx_cmd.len = cpu_to_le16(len);
2640
2641 /* select Rx antennas */
2642 scan->flags |= il3945_get_antenna_flags(il);
2643
2644 scan->channel_count =
2645 il3945_get_channels_for_scan(il, band, is_active, n_probes,
2646 (void *)&scan->data[len], vif);
2647 if (scan->channel_count == 0) {
2648 D_SCAN("channel count %d\n", scan->channel_count);
2649 return -EIO;
2650 }
2651
2652 cmd.len +=
2653 le16_to_cpu(scan->tx_cmd.len) +
2654 scan->channel_count * sizeof(struct il3945_scan_channel);
2655 cmd.data = scan;
2656 scan->len = cpu_to_le16(cmd.len);
2657
2658 set_bit(S_SCAN_HW, &il->status);
2659 ret = il_send_cmd_sync(il, &cmd);
2660 if (ret)
2661 clear_bit(S_SCAN_HW, &il->status);
2662 return ret;
2663}
2664
2665void
2666il3945_post_scan(struct il_priv *il)
2667{
2668 struct il_rxon_context *ctx = &il->ctx;
2669
2670 /*
2671 * Since setting the RXON may have been deferred while
2672 * performing the scan, fire one off if needed
2673 */
2674 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2675 il3945_commit_rxon(il, ctx);
2676}
2677
2678static void
2679il3945_bg_restart(struct work_struct *data)
2680{
2681 struct il_priv *il = container_of(data, struct il_priv, restart);
2682
2683 if (test_bit(S_EXIT_PENDING, &il->status))
2684 return;
2685
2686 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
2687 mutex_lock(&il->mutex);
2688 il->ctx.vif = NULL;
2689 il->is_open = 0;
2690 mutex_unlock(&il->mutex);
2691 il3945_down(il);
2692 ieee80211_restart_hw(il->hw);
2693 } else {
2694 il3945_down(il);
2695
2696 mutex_lock(&il->mutex);
2697 if (test_bit(S_EXIT_PENDING, &il->status)) {
2698 mutex_unlock(&il->mutex);
2699 return;
2700 }
2701
2702 __il3945_up(il);
2703 mutex_unlock(&il->mutex);
2704 }
2705}
2706
2707static void
2708il3945_bg_rx_replenish(struct work_struct *data)
2709{
2710 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
2711
2712 mutex_lock(&il->mutex);
2713 if (test_bit(S_EXIT_PENDING, &il->status))
2714 goto out;
2715
2716 il3945_rx_replenish(il);
2717out:
2718 mutex_unlock(&il->mutex);
2719}
2720
2721void
2722il3945_post_associate(struct il_priv *il)
2723{
2724 int rc = 0;
2725 struct ieee80211_conf *conf = NULL;
2726 struct il_rxon_context *ctx = &il->ctx;
2727
2728 if (!ctx->vif || !il->is_open)
2729 return;
2730
2731 D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
2732 ctx->active.bssid_addr);
2733
2734 if (test_bit(S_EXIT_PENDING, &il->status))
2735 return;
2736
2737 il_scan_cancel_timeout(il, 200);
2738
2739 conf = &il->hw->conf;
2740
2741 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2742 il3945_commit_rxon(il, ctx);
2743
2744 rc = il_send_rxon_timing(il, ctx);
2745 if (rc)
2746 IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
2747
2748 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2749
2750 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
2751
2752 D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
2753 ctx->vif->bss_conf.beacon_int);
2754
2755 if (ctx->vif->bss_conf.use_short_preamble)
2756 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2757 else
2758 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2759
2760 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2761 if (ctx->vif->bss_conf.use_short_slot)
2762 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2763 else
2764 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2765 }
2766
2767 il3945_commit_rxon(il, ctx);
2768
2769 switch (ctx->vif->type) {
2770 case NL80211_IFTYPE_STATION:
2771 il3945_rate_scale_init(il->hw, IL_AP_ID);
2772 break;
2773 case NL80211_IFTYPE_ADHOC:
2774 il3945_send_beacon_cmd(il);
2775 break;
2776 default:
2777 IL_ERR("%s Should not be called in %d mode\n", __func__,
2778 ctx->vif->type);
2779 break;
2780 }
2781}
2782
2783/*****************************************************************************
2784 *
2785 * mac80211 entry point functions
2786 *
2787 *****************************************************************************/
2788
2789#define UCODE_READY_TIMEOUT (2 * HZ)
2790
2791static int
2792il3945_mac_start(struct ieee80211_hw *hw)
2793{
2794 struct il_priv *il = hw->priv;
2795 int ret;
2796
2797 D_MAC80211("enter\n");
2798
2799 /* we should be verifying the device is ready to be opened */
2800 mutex_lock(&il->mutex);
2801
2802 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2803 * ucode filename and max sizes are card-specific. */
2804
2805 if (!il->ucode_code.len) {
2806 ret = il3945_read_ucode(il);
2807 if (ret) {
2808 IL_ERR("Could not read microcode: %d\n", ret);
2809 mutex_unlock(&il->mutex);
2810 goto out_release_irq;
2811 }
2812 }
2813
2814 ret = __il3945_up(il);
2815
2816 mutex_unlock(&il->mutex);
2817
2818 if (ret)
2819 goto out_release_irq;
2820
2821 D_INFO("Start UP work.\n");
2822
2823 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
2824 * mac80211 will not be run successfully. */
2825 ret = wait_event_timeout(il->wait_command_queue,
2826 test_bit(S_READY, &il->status),
2827 UCODE_READY_TIMEOUT);
2828 if (!ret) {
2829 if (!test_bit(S_READY, &il->status)) {
2830 IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
2831 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2832 ret = -ETIMEDOUT;
2833 goto out_release_irq;
2834 }
2835 }
2836
2837 /* ucode is running and will send rfkill notifications,
2838 * no need to poll the killswitch state anymore */
2839 cancel_delayed_work(&il->_3945.rfkill_poll);
2840
2841 il->is_open = 1;
2842 D_MAC80211("leave\n");
2843 return 0;
2844
2845out_release_irq:
2846 il->is_open = 0;
2847 D_MAC80211("leave - failed\n");
2848 return ret;
2849}
2850
2851static void
2852il3945_mac_stop(struct ieee80211_hw *hw)
2853{
2854 struct il_priv *il = hw->priv;
2855
2856 D_MAC80211("enter\n");
2857
2858 if (!il->is_open) {
2859 D_MAC80211("leave - skip\n");
2860 return;
2861 }
2862
2863 il->is_open = 0;
2864
2865 il3945_down(il);
2866
2867 flush_workqueue(il->workqueue);
2868
2869 /* start polling the killswitch state again */
2870 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2871 round_jiffies_relative(2 * HZ));
2872
2873 D_MAC80211("leave\n");
2874}
2875
2876static void
2877il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2878{
2879 struct il_priv *il = hw->priv;
2880
2881 D_MAC80211("enter\n");
2882
2883 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2884 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2885
2886 if (il3945_tx_skb(il, skb))
2887 dev_kfree_skb_any(skb);
2888
2889 D_MAC80211("leave\n");
2890}
2891
2892void
2893il3945_config_ap(struct il_priv *il)
2894{
2895 struct il_rxon_context *ctx = &il->ctx;
2896 struct ieee80211_vif *vif = ctx->vif;
2897 int rc = 0;
2898
2899 if (test_bit(S_EXIT_PENDING, &il->status))
2900 return;
2901
2902 /* The following should be done only at AP bring up */
2903 if (!(il_is_associated(il))) {
2904
2905 /* RXON - unassoc (to set timing command) */
2906 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2907 il3945_commit_rxon(il, ctx);
2908
2909 /* RXON Timing */
2910 rc = il_send_rxon_timing(il, ctx);
2911 if (rc)
2912 IL_WARN("C_RXON_TIMING failed - "
2913 "Attempting to continue.\n");
2914
2915 ctx->staging.assoc_id = 0;
2916
2917 if (vif->bss_conf.use_short_preamble)
2918 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2919 else
2920 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2921
2922 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2923 if (vif->bss_conf.use_short_slot)
2924 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2925 else
2926 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2927 }
2928 /* restore RXON assoc */
2929 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2930 il3945_commit_rxon(il, ctx);
2931 }
2932 il3945_send_beacon_cmd(il);
2933}
2934
2935static int
2936il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2937 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2938 struct ieee80211_key_conf *key)
2939{
2940 struct il_priv *il = hw->priv;
2941 int ret = 0;
2942 u8 sta_id = IL_INVALID_STATION;
2943 u8 static_key;
2944
2945 D_MAC80211("enter\n");
2946
2947 if (il3945_mod_params.sw_crypto) {
2948 D_MAC80211("leave - hwcrypto disabled\n");
2949 return -EOPNOTSUPP;
2950 }
2951
2952 /*
2953 * To support IBSS RSN, don't program group keys in IBSS, the
2954 * hardware will then not attempt to decrypt the frames.
2955 */
2956 if (vif->type == NL80211_IFTYPE_ADHOC &&
2957 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2958 return -EOPNOTSUPP;
2959
2960 static_key = !il_is_associated(il);
2961
2962 if (!static_key) {
2963 sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
2964 if (sta_id == IL_INVALID_STATION)
2965 return -EINVAL;
2966 }
2967
2968 mutex_lock(&il->mutex);
2969 il_scan_cancel_timeout(il, 100);
2970
2971 switch (cmd) {
2972 case SET_KEY:
2973 if (static_key)
2974 ret = il3945_set_static_key(il, key);
2975 else
2976 ret = il3945_set_dynamic_key(il, key, sta_id);
2977 D_MAC80211("enable hwcrypto key\n");
2978 break;
2979 case DISABLE_KEY:
2980 if (static_key)
2981 ret = il3945_remove_static_key(il);
2982 else
2983 ret = il3945_clear_sta_key_info(il, sta_id);
2984 D_MAC80211("disable hwcrypto key\n");
2985 break;
2986 default:
2987 ret = -EINVAL;
2988 }
2989
2990 mutex_unlock(&il->mutex);
2991 D_MAC80211("leave\n");
2992
2993 return ret;
2994}
2995
2996static int
2997il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2998 struct ieee80211_sta *sta)
2999{
3000 struct il_priv *il = hw->priv;
3001 struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3002 int ret;
3003 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3004 u8 sta_id;
3005
3006 D_INFO("received request to add station %pM\n", sta->addr);
3007 mutex_lock(&il->mutex);
3008 D_INFO("proceeding to add station %pM\n", sta->addr);
3009 sta_priv->common.sta_id = IL_INVALID_STATION;
3010
3011 ret =
3012 il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
3013 if (ret) {
3014 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
3015 /* Should we return success if return code is EEXIST ? */
3016 mutex_unlock(&il->mutex);
3017 return ret;
3018 }
3019
3020 sta_priv->common.sta_id = sta_id;
3021
3022 /* Initialize rate scaling */
3023 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
3024 il3945_rs_rate_init(il, sta, sta_id);
3025 mutex_unlock(&il->mutex);
3026
3027 return 0;
3028}
3029
3030static void
3031il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
3032 unsigned int *total_flags, u64 multicast)
3033{
3034 struct il_priv *il = hw->priv;
3035 __le32 filter_or = 0, filter_nand = 0;
3036 struct il_rxon_context *ctx = &il->ctx;
3037
3038#define CHK(test, flag) do { \
3039 if (*total_flags & (test)) \
3040 filter_or |= (flag); \
3041 else \
3042 filter_nand |= (flag); \
3043 } while (0)
3044
3045 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
3046 *total_flags);
3047
3048 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3049 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3050 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3051
3052#undef CHK
3053
3054 mutex_lock(&il->mutex);
3055
3056 ctx->staging.filter_flags &= ~filter_nand;
3057 ctx->staging.filter_flags |= filter_or;
3058
3059 /*
3060 * Not committing directly because hardware can perform a scan,
3061 * but even if hw is ready, committing here breaks for some reason,
3062 * we'll eventually commit the filter flags change anyway.
3063 */
3064
3065 mutex_unlock(&il->mutex);
3066
3067 /*
3068 * Receiving all multicast frames is always enabled by the
3069 * default flags setup in il_connection_init_rx_config()
3070 * since we currently do not support programming multicast
3071 * filters into the device.
3072 */
3073 *total_flags &=
3074 FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3075 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3076}
3077
3078/*****************************************************************************
3079 *
3080 * sysfs attributes
3081 *
3082 *****************************************************************************/
3083
3084#ifdef CONFIG_IWLEGACY_DEBUG
3085
3086/*
3087 * The following adds a new attribute to the sysfs representation
3088 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3089 * used for controlling the debug level.
3090 *
3091 * See the level definitions in iwl for details.
3092 *
3093 * The debug_level being managed using sysfs below is a per device debug
3094 * level that is used instead of the global debug level if it (the per
3095 * device debug level) is set.
3096 */
3097static ssize_t
3098il3945_show_debug_level(struct device *d, struct device_attribute *attr,
3099 char *buf)
3100{
3101 struct il_priv *il = dev_get_drvdata(d);
3102 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
3103}
3104
3105static ssize_t
3106il3945_store_debug_level(struct device *d, struct device_attribute *attr,
3107 const char *buf, size_t count)
3108{
3109 struct il_priv *il = dev_get_drvdata(d);
3110 unsigned long val;
3111 int ret;
3112
3113 ret = strict_strtoul(buf, 0, &val);
3114 if (ret)
3115 IL_INFO("%s is not in hex or decimal form.\n", buf);
3116 else {
3117 il->debug_level = val;
3118 if (il_alloc_traffic_mem(il))
3119 IL_ERR("Not enough memory to generate traffic log\n");
3120 }
3121 return strnlen(buf, count);
3122}
3123
3124static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
3125 il3945_store_debug_level);
3126
3127#endif /* CONFIG_IWLEGACY_DEBUG */
3128
3129static ssize_t
3130il3945_show_temperature(struct device *d, struct device_attribute *attr,
3131 char *buf)
3132{
3133 struct il_priv *il = dev_get_drvdata(d);
3134
3135 if (!il_is_alive(il))
3136 return -EAGAIN;
3137
3138 return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
3139}
3140
3141static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
3142
3143static ssize_t
3144il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
3145{
3146 struct il_priv *il = dev_get_drvdata(d);
3147 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
3148}
3149
3150static ssize_t
3151il3945_store_tx_power(struct device *d, struct device_attribute *attr,
3152 const char *buf, size_t count)
3153{
3154 struct il_priv *il = dev_get_drvdata(d);
3155 char *p = (char *)buf;
3156 u32 val;
3157
3158 val = simple_strtoul(p, &p, 10);
3159 if (p == buf)
3160 IL_INFO(": %s is not in decimal form.\n", buf);
3161 else
3162 il3945_hw_reg_set_txpower(il, val);
3163
3164 return count;
3165}
3166
3167static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
3168 il3945_store_tx_power);
3169
3170static ssize_t
3171il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
3172{
3173 struct il_priv *il = dev_get_drvdata(d);
3174 struct il_rxon_context *ctx = &il->ctx;
3175
3176 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3177}
3178
3179static ssize_t
3180il3945_store_flags(struct device *d, struct device_attribute *attr,
3181 const char *buf, size_t count)
3182{
3183 struct il_priv *il = dev_get_drvdata(d);
3184 u32 flags = simple_strtoul(buf, NULL, 0);
3185 struct il_rxon_context *ctx = &il->ctx;
3186
3187 mutex_lock(&il->mutex);
3188 if (le32_to_cpu(ctx->staging.flags) != flags) {
3189 /* Cancel any currently running scans... */
3190 if (il_scan_cancel_timeout(il, 100))
3191 IL_WARN("Could not cancel scan.\n");
3192 else {
3193 D_INFO("Committing rxon.flags = 0x%04X\n", flags);
3194 ctx->staging.flags = cpu_to_le32(flags);
3195 il3945_commit_rxon(il, ctx);
3196 }
3197 }
3198 mutex_unlock(&il->mutex);
3199
3200 return count;
3201}
3202
3203static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
3204 il3945_store_flags);
3205
3206static ssize_t
3207il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
3208 char *buf)
3209{
3210 struct il_priv *il = dev_get_drvdata(d);
3211 struct il_rxon_context *ctx = &il->ctx;
3212
3213 return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
3214}
3215
3216static ssize_t
3217il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
3218 const char *buf, size_t count)
3219{
3220 struct il_priv *il = dev_get_drvdata(d);
3221 struct il_rxon_context *ctx = &il->ctx;
3222 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3223
3224 mutex_lock(&il->mutex);
3225 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3226 /* Cancel any currently running scans... */
3227 if (il_scan_cancel_timeout(il, 100))
3228 IL_WARN("Could not cancel scan.\n");
3229 else {
3230 D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
3231 filter_flags);
3232 ctx->staging.filter_flags = cpu_to_le32(filter_flags);
3233 il3945_commit_rxon(il, ctx);
3234 }
3235 }
3236 mutex_unlock(&il->mutex);
3237
3238 return count;
3239}
3240
3241static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
3242 il3945_store_filter_flags);
3243
3244static ssize_t
3245il3945_show_measurement(struct device *d, struct device_attribute *attr,
3246 char *buf)
3247{
3248 struct il_priv *il = dev_get_drvdata(d);
3249 struct il_spectrum_notification measure_report;
3250 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3251 u8 *data = (u8 *) &measure_report;
3252 unsigned long flags;
3253
3254 spin_lock_irqsave(&il->lock, flags);
3255 if (!(il->measurement_status & MEASUREMENT_READY)) {
3256 spin_unlock_irqrestore(&il->lock, flags);
3257 return 0;
3258 }
3259 memcpy(&measure_report, &il->measure_report, size);
3260 il->measurement_status = 0;
3261 spin_unlock_irqrestore(&il->lock, flags);
3262
3263 while (size && PAGE_SIZE - len) {
3264 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3265 PAGE_SIZE - len, 1);
3266 len = strlen(buf);
3267 if (PAGE_SIZE - len)
3268 buf[len++] = '\n';
3269
3270 ofs += 16;
3271 size -= min(size, 16U);
3272 }
3273
3274 return len;
3275}
3276
3277static ssize_t
3278il3945_store_measurement(struct device *d, struct device_attribute *attr,
3279 const char *buf, size_t count)
3280{
3281 struct il_priv *il = dev_get_drvdata(d);
3282 struct il_rxon_context *ctx = &il->ctx;
3283 struct ieee80211_measurement_params params = {
3284 .channel = le16_to_cpu(ctx->active.channel),
3285 .start_time = cpu_to_le64(il->_3945.last_tsf),
3286 .duration = cpu_to_le16(1),
3287 };
3288 u8 type = IL_MEASURE_BASIC;
3289 u8 buffer[32];
3290 u8 channel;
3291
3292 if (count) {
3293 char *p = buffer;
3294 strncpy(buffer, buf, min(sizeof(buffer), count));
3295 channel = simple_strtoul(p, NULL, 0);
3296 if (channel)
3297 params.channel = channel;
3298
3299 p = buffer;
3300 while (*p && *p != ' ')
3301 p++;
3302 if (*p)
3303 type = simple_strtoul(p + 1, NULL, 0);
3304 }
3305
3306 D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
3307 type, params.channel, buf);
3308 il3945_get_measurement(il, &params, type);
3309
3310 return count;
3311}
3312
3313static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
3314 il3945_store_measurement);
3315
3316static ssize_t
3317il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
3318 const char *buf, size_t count)
3319{
3320 struct il_priv *il = dev_get_drvdata(d);
3321
3322 il->retry_rate = simple_strtoul(buf, NULL, 0);
3323 if (il->retry_rate <= 0)
3324 il->retry_rate = 1;
3325
3326 return count;
3327}
3328
3329static ssize_t
3330il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
3331 char *buf)
3332{
3333 struct il_priv *il = dev_get_drvdata(d);
3334 return sprintf(buf, "%d", il->retry_rate);
3335}
3336
3337static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
3338 il3945_store_retry_rate);
3339
3340static ssize_t
3341il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
3342{
3343 /* all this shit doesn't belong into sysfs anyway */
3344 return 0;
3345}
3346
3347static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
3348
3349static ssize_t
3350il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
3351{
3352 struct il_priv *il = dev_get_drvdata(d);
3353
3354 if (!il_is_alive(il))
3355 return -EAGAIN;
3356
3357 return sprintf(buf, "%d\n", il3945_mod_params.antenna);
3358}
3359
3360static ssize_t
3361il3945_store_antenna(struct device *d, struct device_attribute *attr,
3362 const char *buf, size_t count)
3363{
3364 struct il_priv *il __maybe_unused = dev_get_drvdata(d);
3365 int ant;
3366
3367 if (count == 0)
3368 return 0;
3369
3370 if (sscanf(buf, "%1i", &ant) != 1) {
3371 D_INFO("not in hex or decimal form.\n");
3372 return count;
3373 }
3374
3375 if (ant >= 0 && ant <= 2) {
3376 D_INFO("Setting antenna select to %d.\n", ant);
3377 il3945_mod_params.antenna = (enum il3945_antenna)ant;
3378 } else
3379 D_INFO("Bad antenna select value %d.\n", ant);
3380
3381 return count;
3382}
3383
3384static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
3385 il3945_store_antenna);
3386
3387static ssize_t
3388il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
3389{
3390 struct il_priv *il = dev_get_drvdata(d);
3391 if (!il_is_alive(il))
3392 return -EAGAIN;
3393 return sprintf(buf, "0x%08x\n", (int)il->status);
3394}
3395
3396static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
3397
3398static ssize_t
3399il3945_dump_error_log(struct device *d, struct device_attribute *attr,
3400 const char *buf, size_t count)
3401{
3402 struct il_priv *il = dev_get_drvdata(d);
3403 char *p = (char *)buf;
3404
3405 if (p[0] == '1')
3406 il3945_dump_nic_error_log(il);
3407
3408 return strnlen(buf, count);
3409}
3410
3411static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
3412
3413/*****************************************************************************
3414 *
3415 * driver setup and tear down
3416 *
3417 *****************************************************************************/
3418
3419static void
3420il3945_setup_deferred_work(struct il_priv *il)
3421{
3422 il->workqueue = create_singlethread_workqueue(DRV_NAME);
3423
3424 init_waitqueue_head(&il->wait_command_queue);
3425
3426 INIT_WORK(&il->restart, il3945_bg_restart);
3427 INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
3428 INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
3429 INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
3430 INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
3431
3432 il_setup_scan_deferred_work(il);
3433
3434 il3945_hw_setup_deferred_work(il);
3435
3436 init_timer(&il->watchdog);
3437 il->watchdog.data = (unsigned long)il;
3438 il->watchdog.function = il_bg_watchdog;
3439
3440 tasklet_init(&il->irq_tasklet,
3441 (void (*)(unsigned long))il3945_irq_tasklet,
3442 (unsigned long)il);
3443}
3444
3445static void
3446il3945_cancel_deferred_work(struct il_priv *il)
3447{
3448 il3945_hw_cancel_deferred_work(il);
3449
3450 cancel_delayed_work_sync(&il->init_alive_start);
3451 cancel_delayed_work(&il->alive_start);
3452
3453 il_cancel_scan_deferred_work(il);
3454}
3455
3456static struct attribute *il3945_sysfs_entries[] = {
3457 &dev_attr_antenna.attr,
3458 &dev_attr_channels.attr,
3459 &dev_attr_dump_errors.attr,
3460 &dev_attr_flags.attr,
3461 &dev_attr_filter_flags.attr,
3462 &dev_attr_measurement.attr,
3463 &dev_attr_retry_rate.attr,
3464 &dev_attr_status.attr,
3465 &dev_attr_temperature.attr,
3466 &dev_attr_tx_power.attr,
3467#ifdef CONFIG_IWLEGACY_DEBUG
3468 &dev_attr_debug_level.attr,
3469#endif
3470 NULL
3471};
3472
3473static struct attribute_group il3945_attribute_group = {
3474 .name = NULL, /* put in device directory */
3475 .attrs = il3945_sysfs_entries,
3476};
3477
3478struct ieee80211_ops il3945_hw_ops = {
3479 .tx = il3945_mac_tx,
3480 .start = il3945_mac_start,
3481 .stop = il3945_mac_stop,
3482 .add_interface = il_mac_add_interface,
3483 .remove_interface = il_mac_remove_interface,
3484 .change_interface = il_mac_change_interface,
3485 .config = il_mac_config,
3486 .configure_filter = il3945_configure_filter,
3487 .set_key = il3945_mac_set_key,
3488 .conf_tx = il_mac_conf_tx,
3489 .reset_tsf = il_mac_reset_tsf,
3490 .bss_info_changed = il_mac_bss_info_changed,
3491 .hw_scan = il_mac_hw_scan,
3492 .sta_add = il3945_mac_sta_add,
3493 .sta_remove = il_mac_sta_remove,
3494 .tx_last_beacon = il_mac_tx_last_beacon,
3495};
3496
3497static int
3498il3945_init_drv(struct il_priv *il)
3499{
3500 int ret;
3501 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
3502
3503 il->retry_rate = 1;
3504 il->beacon_skb = NULL;
3505
3506 spin_lock_init(&il->sta_lock);
3507 spin_lock_init(&il->hcmd_lock);
3508
3509 INIT_LIST_HEAD(&il->free_frames);
3510
3511 mutex_init(&il->mutex);
3512
3513 il->ieee_channels = NULL;
3514 il->ieee_rates = NULL;
3515 il->band = IEEE80211_BAND_2GHZ;
3516
3517 il->iw_mode = NL80211_IFTYPE_STATION;
3518 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
3519
3520 /* initialize force reset */
3521 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
3522
3523 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3524 IL_WARN("Unsupported EEPROM version: 0x%04X\n",
3525 eeprom->version);
3526 ret = -EINVAL;
3527 goto err;
3528 }
3529 ret = il_init_channel_map(il);
3530 if (ret) {
3531 IL_ERR("initializing regulatory failed: %d\n", ret);
3532 goto err;
3533 }
3534
3535 /* Set up txpower settings in driver for all channels */
3536 if (il3945_txpower_set_from_eeprom(il)) {
3537 ret = -EIO;
3538 goto err_free_channel_map;
3539 }
3540
3541 ret = il_init_geos(il);
3542 if (ret) {
3543 IL_ERR("initializing geos failed: %d\n", ret);
3544 goto err_free_channel_map;
3545 }
3546 il3945_init_hw_rates(il, il->ieee_rates);
3547
3548 return 0;
3549
3550err_free_channel_map:
3551 il_free_channel_map(il);
3552err:
3553 return ret;
3554}
3555
3556#define IL3945_MAX_PROBE_REQUEST 200
3557
3558static int
3559il3945_setup_mac(struct il_priv *il)
3560{
3561 int ret;
3562 struct ieee80211_hw *hw = il->hw;
3563
3564 hw->rate_control_algorithm = "iwl-3945-rs";
3565 hw->sta_data_size = sizeof(struct il3945_sta_priv);
3566 hw->vif_data_size = sizeof(struct il_vif_priv);
3567
3568 /* Tell mac80211 our characteristics */
3569 hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
3570
3571 hw->wiphy->interface_modes = il->ctx.interface_modes;
3572
3573 hw->wiphy->flags |=
3574 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
3575 WIPHY_FLAG_IBSS_RSN;
3576
3577 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3578 /* we create the 802.11 header and a zero-length SSID element */
3579 hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
3580
3581 /* Default value; 4 EDCA QOS priorities */
3582 hw->queues = 4;
3583
3584 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
3585 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3586 &il->bands[IEEE80211_BAND_2GHZ];
3587
3588 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
3589 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3590 &il->bands[IEEE80211_BAND_5GHZ];
3591
3592 il_leds_init(il);
3593
3594 ret = ieee80211_register_hw(il->hw);
3595 if (ret) {
3596 IL_ERR("Failed to register hw (error %d)\n", ret);
3597 return ret;
3598 }
3599 il->mac80211_registered = 1;
3600
3601 return 0;
3602}
3603
3604static int
3605il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3606{
3607 int err = 0;
3608 struct il_priv *il;
3609 struct ieee80211_hw *hw;
3610 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
3611 struct il3945_eeprom *eeprom;
3612 unsigned long flags;
3613
3614 /***********************
3615 * 1. Allocating HW data
3616 * ********************/
3617
3618 /* mac80211 allocates memory for this device instance, including
3619 * space for this driver's ilate structure */
3620 hw = il_alloc_all(cfg);
3621 if (hw == NULL) {
3622 pr_err("Can not allocate network device\n");
3623 err = -ENOMEM;
3624 goto out;
3625 }
3626 il = hw->priv;
3627 SET_IEEE80211_DEV(hw, &pdev->dev);
3628
3629 il->cmd_queue = IL39_CMD_QUEUE_NUM;
3630
3631 il->ctx.ctxid = 0;
3632
3633 il->ctx.rxon_cmd = C_RXON;
3634 il->ctx.rxon_timing_cmd = C_RXON_TIMING;
3635 il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
3636 il->ctx.qos_cmd = C_QOS_PARAM;
3637 il->ctx.ap_sta_id = IL_AP_ID;
3638 il->ctx.wep_key_cmd = C_WEPKEY;
3639 il->ctx.interface_modes =
3640 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
3641 il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
3642 il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
3643 il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
3644
3645 /*
3646 * Disabling hardware scan means that mac80211 will perform scans
3647 * "the hard way", rather than using device's scan.
3648 */
3649 if (il3945_mod_params.disable_hw_scan) {
3650 D_INFO("Disabling hw_scan\n");
3651 il3945_hw_ops.hw_scan = NULL;
3652 }
3653
3654 D_INFO("*** LOAD DRIVER ***\n");
3655 il->cfg = cfg;
3656 il->pci_dev = pdev;
3657 il->inta_mask = CSR_INI_SET_MASK;
3658
3659 if (il_alloc_traffic_mem(il))
3660 IL_ERR("Not enough memory to generate traffic log\n");
3661
3662 /***************************
3663 * 2. Initializing PCI bus
3664 * *************************/
3665 pci_disable_link_state(pdev,
3666 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3667 PCIE_LINK_STATE_CLKPM);
3668
3669 if (pci_enable_device(pdev)) {
3670 err = -ENODEV;
3671 goto out_ieee80211_free_hw;
3672 }
3673
3674 pci_set_master(pdev);
3675
3676 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3677 if (!err)
3678 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3679 if (err) {
3680 IL_WARN("No suitable DMA available.\n");
3681 goto out_pci_disable_device;
3682 }
3683
3684 pci_set_drvdata(pdev, il);
3685 err = pci_request_regions(pdev, DRV_NAME);
3686 if (err)
3687 goto out_pci_disable_device;
3688
3689 /***********************
3690 * 3. Read REV Register
3691 * ********************/
3692 il->hw_base = pci_iomap(pdev, 0, 0);
3693 if (!il->hw_base) {
3694 err = -ENODEV;
3695 goto out_pci_release_regions;
3696 }
3697
3698 D_INFO("pci_resource_len = 0x%08llx\n",
3699 (unsigned long long)pci_resource_len(pdev, 0));
3700 D_INFO("pci_resource_base = %p\n", il->hw_base);
3701
3702 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3703 * PCI Tx retries from interfering with C3 CPU state */
3704 pci_write_config_byte(pdev, 0x41, 0x00);
3705
3706 /* these spin locks will be used in apm_ops.init and EEPROM access
3707 * we should init now
3708 */
3709 spin_lock_init(&il->reg_lock);
3710 spin_lock_init(&il->lock);
3711
3712 /*
3713 * stop and reset the on-board processor just in case it is in a
3714 * strange state ... like being left stranded by a primary kernel
3715 * and this is now the kdump kernel trying to start up
3716 */
3717 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3718
3719 /***********************
3720 * 4. Read EEPROM
3721 * ********************/
3722
3723 /* Read the EEPROM */
3724 err = il_eeprom_init(il);
3725 if (err) {
3726 IL_ERR("Unable to init EEPROM\n");
3727 goto out_iounmap;
3728 }
3729 /* MAC Address location in EEPROM same for 3945/4965 */
3730 eeprom = (struct il3945_eeprom *)il->eeprom;
3731 D_INFO("MAC address: %pM\n", eeprom->mac_address);
3732 SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
3733
3734 /***********************
3735 * 5. Setup HW Constants
3736 * ********************/
3737 /* Device-specific setup */
3738 if (il3945_hw_set_hw_params(il)) {
3739 IL_ERR("failed to set hw settings\n");
3740 goto out_eeprom_free;
3741 }
3742
3743 /***********************
3744 * 6. Setup il
3745 * ********************/
3746
3747 err = il3945_init_drv(il);
3748 if (err) {
3749 IL_ERR("initializing driver failed\n");
3750 goto out_unset_hw_params;
3751 }
3752
3753 IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
3754
3755 /***********************
3756 * 7. Setup Services
3757 * ********************/
3758
3759 spin_lock_irqsave(&il->lock, flags);
3760 il_disable_interrupts(il);
3761 spin_unlock_irqrestore(&il->lock, flags);
3762
3763 pci_enable_msi(il->pci_dev);
3764
3765 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
3766 if (err) {
3767 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
3768 goto out_disable_msi;
3769 }
3770
3771 err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
3772 if (err) {
3773 IL_ERR("failed to create sysfs device attributes\n");
3774 goto out_release_irq;
3775 }
3776
3777 il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
3778 &il->ctx);
3779 il3945_setup_deferred_work(il);
3780 il3945_setup_handlers(il);
3781 il_power_initialize(il);
3782
3783 /*********************************
3784 * 8. Setup and Register mac80211
3785 * *******************************/
3786
3787 il_enable_interrupts(il);
3788
3789 err = il3945_setup_mac(il);
3790 if (err)
3791 goto out_remove_sysfs;
3792
3793 err = il_dbgfs_register(il, DRV_NAME);
3794 if (err)
3795 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
3796 err);
3797
3798 /* Start monitoring the killswitch */
3799 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
3800
3801 return 0;
3802
3803out_remove_sysfs:
3804 destroy_workqueue(il->workqueue);
3805 il->workqueue = NULL;
3806 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3807out_release_irq:
3808 free_irq(il->pci_dev->irq, il);
3809out_disable_msi:
3810 pci_disable_msi(il->pci_dev);
3811 il_free_geos(il);
3812 il_free_channel_map(il);
3813out_unset_hw_params:
3814 il3945_unset_hw_params(il);
3815out_eeprom_free:
3816 il_eeprom_free(il);
3817out_iounmap:
3818 pci_iounmap(pdev, il->hw_base);
3819out_pci_release_regions:
3820 pci_release_regions(pdev);
3821out_pci_disable_device:
3822 pci_set_drvdata(pdev, NULL);
3823 pci_disable_device(pdev);
3824out_ieee80211_free_hw:
3825 il_free_traffic_mem(il);
3826 ieee80211_free_hw(il->hw);
3827out:
3828 return err;
3829}
3830
3831static void __devexit
3832il3945_pci_remove(struct pci_dev *pdev)
3833{
3834 struct il_priv *il = pci_get_drvdata(pdev);
3835 unsigned long flags;
3836
3837 if (!il)
3838 return;
3839
3840 D_INFO("*** UNLOAD DRIVER ***\n");
3841
3842 il_dbgfs_unregister(il);
3843
3844 set_bit(S_EXIT_PENDING, &il->status);
3845
3846 il_leds_exit(il);
3847
3848 if (il->mac80211_registered) {
3849 ieee80211_unregister_hw(il->hw);
3850 il->mac80211_registered = 0;
3851 } else {
3852 il3945_down(il);
3853 }
3854
3855 /*
3856 * Make sure device is reset to low power before unloading driver.
3857 * This may be redundant with il_down(), but there are paths to
3858 * run il_down() without calling apm_ops.stop(), and there are
3859 * paths to avoid running il_down() at all before leaving driver.
3860 * This (inexpensive) call *makes sure* device is reset.
3861 */
3862 il_apm_stop(il);
3863
3864 /* make sure we flush any pending irq or
3865 * tasklet for the driver
3866 */
3867 spin_lock_irqsave(&il->lock, flags);
3868 il_disable_interrupts(il);
3869 spin_unlock_irqrestore(&il->lock, flags);
3870
3871 il3945_synchronize_irq(il);
3872
3873 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3874
3875 cancel_delayed_work_sync(&il->_3945.rfkill_poll);
3876
3877 il3945_dealloc_ucode_pci(il);
3878
3879 if (il->rxq.bd)
3880 il3945_rx_queue_free(il, &il->rxq);
3881 il3945_hw_txq_ctx_free(il);
3882
3883 il3945_unset_hw_params(il);
3884
3885 /*netif_stop_queue(dev); */
3886 flush_workqueue(il->workqueue);
3887
3888 /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
3889 * il->workqueue... so we can't take down the workqueue
3890 * until now... */
3891 destroy_workqueue(il->workqueue);
3892 il->workqueue = NULL;
3893 il_free_traffic_mem(il);
3894
3895 free_irq(pdev->irq, il);
3896 pci_disable_msi(pdev);
3897
3898 pci_iounmap(pdev, il->hw_base);
3899 pci_release_regions(pdev);
3900 pci_disable_device(pdev);
3901 pci_set_drvdata(pdev, NULL);
3902
3903 il_free_channel_map(il);
3904 il_free_geos(il);
3905 kfree(il->scan_cmd);
3906 if (il->beacon_skb)
3907 dev_kfree_skb(il->beacon_skb);
3908
3909 ieee80211_free_hw(il->hw);
3910}
3911
3912/*****************************************************************************
3913 *
3914 * driver and module entry point
3915 *
3916 *****************************************************************************/
3917
3918static struct pci_driver il3945_driver = {
3919 .name = DRV_NAME,
3920 .id_table = il3945_hw_card_ids,
3921 .probe = il3945_pci_probe,
3922 .remove = __devexit_p(il3945_pci_remove),
3923 .driver.pm = IL_LEGACY_PM_OPS,
3924};
3925
3926static int __init
3927il3945_init(void)
3928{
3929
3930 int ret;
3931 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3932 pr_info(DRV_COPYRIGHT "\n");
3933
3934 ret = il3945_rate_control_register();
3935 if (ret) {
3936 pr_err("Unable to register rate control algorithm: %d\n", ret);
3937 return ret;
3938 }
3939
3940 ret = pci_register_driver(&il3945_driver);
3941 if (ret) {
3942 pr_err("Unable to initialize PCI module\n");
3943 goto error_register;
3944 }
3945
3946 return ret;
3947
3948error_register:
3949 il3945_rate_control_unregister();
3950 return ret;
3951}
3952
3953static void __exit
3954il3945_exit(void)
3955{
3956 pci_unregister_driver(&il3945_driver);
3957 il3945_rate_control_unregister();
3958}
3959
3960MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
3961
3962module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
3963MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
3964module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
3965MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
3966module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
3967 S_IRUGO);
3968MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
3969#ifdef CONFIG_IWLEGACY_DEBUG
3970module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
3971MODULE_PARM_DESC(debug, "debug output mask");
3972#endif
3973module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
3974MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3975
3976module_exit(il3945_exit);
3977module_init(il3945_init);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
new file mode 100644
index 000000000000..30ad404f8df7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -0,0 +1,995 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/skbuff.h>
30#include <linux/slab.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "commands.h"
40#include "3945.h"
41
42#define RS_NAME "iwl-3945-rs"
43
44static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
45 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
46};
47
48static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
49 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
50};
51
52static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
53 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
54};
55
56static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
57 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
58};
59
60struct il3945_tpt_entry {
61 s8 min_rssi;
62 u8 idx;
63};
64
65static struct il3945_tpt_entry il3945_tpt_table_a[] = {
66 {-60, RATE_54M_IDX},
67 {-64, RATE_48M_IDX},
68 {-72, RATE_36M_IDX},
69 {-80, RATE_24M_IDX},
70 {-84, RATE_18M_IDX},
71 {-85, RATE_12M_IDX},
72 {-87, RATE_9M_IDX},
73 {-89, RATE_6M_IDX}
74};
75
76static struct il3945_tpt_entry il3945_tpt_table_g[] = {
77 {-60, RATE_54M_IDX},
78 {-64, RATE_48M_IDX},
79 {-68, RATE_36M_IDX},
80 {-80, RATE_24M_IDX},
81 {-84, RATE_18M_IDX},
82 {-85, RATE_12M_IDX},
83 {-86, RATE_11M_IDX},
84 {-88, RATE_5M_IDX},
85 {-90, RATE_2M_IDX},
86 {-92, RATE_1M_IDX}
87};
88
89#define RATE_MAX_WINDOW 62
90#define RATE_FLUSH (3*HZ)
91#define RATE_WIN_FLUSH (HZ/2)
92#define IL39_RATE_HIGH_TH 11520
93#define IL_SUCCESS_UP_TH 8960
94#define IL_SUCCESS_DOWN_TH 10880
95#define RATE_MIN_FAILURE_TH 6
96#define RATE_MIN_SUCCESS_TH 8
97#define RATE_DECREASE_TH 1920
98#define RATE_RETRY_TH 15
99
100static u8
101il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
102{
103 u32 idx = 0;
104 u32 table_size = 0;
105 struct il3945_tpt_entry *tpt_table = NULL;
106
107 if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
108 rssi = IL_MIN_RSSI_VAL;
109
110 switch (band) {
111 case IEEE80211_BAND_2GHZ:
112 tpt_table = il3945_tpt_table_g;
113 table_size = ARRAY_SIZE(il3945_tpt_table_g);
114 break;
115
116 case IEEE80211_BAND_5GHZ:
117 tpt_table = il3945_tpt_table_a;
118 table_size = ARRAY_SIZE(il3945_tpt_table_a);
119 break;
120
121 default:
122 BUG();
123 break;
124 }
125
126 while (idx < table_size && rssi < tpt_table[idx].min_rssi)
127 idx++;
128
129 idx = min(idx, (table_size - 1));
130
131 return tpt_table[idx].idx;
132}
133
134static void
135il3945_clear_win(struct il3945_rate_scale_data *win)
136{
137 win->data = 0;
138 win->success_counter = 0;
139 win->success_ratio = -1;
140 win->counter = 0;
141 win->average_tpt = IL_INVALID_VALUE;
142 win->stamp = 0;
143}
144
145/**
146 * il3945_rate_scale_flush_wins - flush out the rate scale wins
147 *
148 * Returns the number of wins that have gathered data but were
149 * not flushed. If there were any that were not flushed, then
150 * reschedule the rate flushing routine.
151 */
152static int
153il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
154{
155 int unflushed = 0;
156 int i;
157 unsigned long flags;
158 struct il_priv *il __maybe_unused = rs_sta->il;
159
160 /*
161 * For each rate, if we have collected data on that rate
162 * and it has been more than RATE_WIN_FLUSH
163 * since we flushed, clear out the gathered stats
164 */
165 for (i = 0; i < RATE_COUNT_3945; i++) {
166 if (!rs_sta->win[i].counter)
167 continue;
168
169 spin_lock_irqsave(&rs_sta->lock, flags);
170 if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
171 D_RATE("flushing %d samples of rate " "idx %d\n",
172 rs_sta->win[i].counter, i);
173 il3945_clear_win(&rs_sta->win[i]);
174 } else
175 unflushed++;
176 spin_unlock_irqrestore(&rs_sta->lock, flags);
177 }
178
179 return unflushed;
180}
181
182#define RATE_FLUSH_MAX 5000 /* msec */
183#define RATE_FLUSH_MIN 50 /* msec */
184#define IL_AVERAGE_PACKETS 1500
185
186static void
187il3945_bg_rate_scale_flush(unsigned long data)
188{
189 struct il3945_rs_sta *rs_sta = (void *)data;
190 struct il_priv *il __maybe_unused = rs_sta->il;
191 int unflushed = 0;
192 unsigned long flags;
193 u32 packet_count, duration, pps;
194
195 D_RATE("enter\n");
196
197 unflushed = il3945_rate_scale_flush_wins(rs_sta);
198
199 spin_lock_irqsave(&rs_sta->lock, flags);
200
201 /* Number of packets Rx'd since last time this timer ran */
202 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
203
204 rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
205
206 if (unflushed) {
207 duration =
208 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
209
210 D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
211
212 /* Determine packets per second */
213 if (duration)
214 pps = (packet_count * 1000) / duration;
215 else
216 pps = 0;
217
218 if (pps) {
219 duration = (IL_AVERAGE_PACKETS * 1000) / pps;
220 if (duration < RATE_FLUSH_MIN)
221 duration = RATE_FLUSH_MIN;
222 else if (duration > RATE_FLUSH_MAX)
223 duration = RATE_FLUSH_MAX;
224 } else
225 duration = RATE_FLUSH_MAX;
226
227 rs_sta->flush_time = msecs_to_jiffies(duration);
228
229 D_RATE("new flush period: %d msec ave %d\n", duration,
230 packet_count);
231
232 mod_timer(&rs_sta->rate_scale_flush,
233 jiffies + rs_sta->flush_time);
234
235 rs_sta->last_partial_flush = jiffies;
236 } else {
237 rs_sta->flush_time = RATE_FLUSH;
238 rs_sta->flush_pending = 0;
239 }
240 /* If there weren't any unflushed entries, we don't schedule the timer
241 * to run again */
242
243 rs_sta->last_flush = jiffies;
244
245 spin_unlock_irqrestore(&rs_sta->lock, flags);
246
247 D_RATE("leave\n");
248}
249
250/**
251 * il3945_collect_tx_data - Update the success/failure sliding win
252 *
253 * We keep a sliding win of the last 64 packets transmitted
254 * at this rate. win->data contains the bitmask of successful
255 * packets.
256 */
257static void
258il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
259 struct il3945_rate_scale_data *win, int success,
260 int retries, int idx)
261{
262 unsigned long flags;
263 s32 fail_count;
264 struct il_priv *il __maybe_unused = rs_sta->il;
265
266 if (!retries) {
267 D_RATE("leave: retries == 0 -- should be at least 1\n");
268 return;
269 }
270
271 spin_lock_irqsave(&rs_sta->lock, flags);
272
273 /*
274 * Keep track of only the latest 62 tx frame attempts in this rate's
275 * history win; anything older isn't really relevant any more.
276 * If we have filled up the sliding win, drop the oldest attempt;
277 * if the oldest attempt (highest bit in bitmap) shows "success",
278 * subtract "1" from the success counter (this is the main reason
279 * we keep these bitmaps!).
280 * */
281 while (retries > 0) {
282 if (win->counter >= RATE_MAX_WINDOW) {
283
284 /* remove earliest */
285 win->counter = RATE_MAX_WINDOW - 1;
286
287 if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
288 win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
289 win->success_counter--;
290 }
291 }
292
293 /* Increment frames-attempted counter */
294 win->counter++;
295
296 /* Shift bitmap by one frame (throw away oldest history),
297 * OR in "1", and increment "success" if this
298 * frame was successful. */
299 win->data <<= 1;
300 if (success > 0) {
301 win->success_counter++;
302 win->data |= 0x1;
303 success--;
304 }
305
306 retries--;
307 }
308
309 /* Calculate current success ratio, avoid divide-by-0! */
310 if (win->counter > 0)
311 win->success_ratio =
312 128 * (100 * win->success_counter) / win->counter;
313 else
314 win->success_ratio = IL_INVALID_VALUE;
315
316 fail_count = win->counter - win->success_counter;
317
318 /* Calculate average throughput, if we have enough history. */
319 if (fail_count >= RATE_MIN_FAILURE_TH ||
320 win->success_counter >= RATE_MIN_SUCCESS_TH)
321 win->average_tpt =
322 ((win->success_ratio * rs_sta->expected_tpt[idx] +
323 64) / 128);
324 else
325 win->average_tpt = IL_INVALID_VALUE;
326
327 /* Tag this win as having been updated */
328 win->stamp = jiffies;
329
330 spin_unlock_irqrestore(&rs_sta->lock, flags);
331
332}
333
334/*
335 * Called after adding a new station to initialize rate scaling
336 */
337void
338il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
339{
340 struct ieee80211_hw *hw = il->hw;
341 struct ieee80211_conf *conf = &il->hw->conf;
342 struct il3945_sta_priv *psta;
343 struct il3945_rs_sta *rs_sta;
344 struct ieee80211_supported_band *sband;
345 int i;
346
347 D_INFO("enter\n");
348 if (sta_id == il->ctx.bcast_sta_id)
349 goto out;
350
351 psta = (struct il3945_sta_priv *)sta->drv_priv;
352 rs_sta = &psta->rs_sta;
353 sband = hw->wiphy->bands[conf->channel->band];
354
355 rs_sta->il = il;
356
357 rs_sta->start_rate = RATE_INVALID;
358
359 /* default to just 802.11b */
360 rs_sta->expected_tpt = il3945_expected_tpt_b;
361
362 rs_sta->last_partial_flush = jiffies;
363 rs_sta->last_flush = jiffies;
364 rs_sta->flush_time = RATE_FLUSH;
365 rs_sta->last_tx_packets = 0;
366
367 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
368 rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
369
370 for (i = 0; i < RATE_COUNT_3945; i++)
371 il3945_clear_win(&rs_sta->win[i]);
372
373 /* TODO: what is a good starting rate for STA? About middle? Maybe not
374 * the lowest or the highest rate.. Could consider using RSSI from
375 * previous packets? Need to have IEEE 802.1X auth succeed immediately
376 * after assoc.. */
377
378 for (i = sband->n_bitrates - 1; i >= 0; i--) {
379 if (sta->supp_rates[sband->band] & (1 << i)) {
380 rs_sta->last_txrate_idx = i;
381 break;
382 }
383 }
384
385 il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
386 /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
387 if (sband->band == IEEE80211_BAND_5GHZ) {
388 rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
389 il->_3945.sta_supp_rates =
390 il->_3945.sta_supp_rates << IL_FIRST_OFDM_RATE;
391 }
392
393out:
394 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
395
396 D_INFO("leave\n");
397}
398
399static void *
400il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
401{
402 return hw->priv;
403}
404
405/* rate scale requires free function to be implemented */
406static void
407il3945_rs_free(void *il)
408{
409 return;
410}
411
412static void *
413il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
414{
415 struct il3945_rs_sta *rs_sta;
416 struct il3945_sta_priv *psta = (void *)sta->drv_priv;
417 struct il_priv *il __maybe_unused = il_priv;
418
419 D_RATE("enter\n");
420
421 rs_sta = &psta->rs_sta;
422
423 spin_lock_init(&rs_sta->lock);
424 init_timer(&rs_sta->rate_scale_flush);
425
426 D_RATE("leave\n");
427
428 return rs_sta;
429}
430
431static void
432il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
433{
434 struct il3945_rs_sta *rs_sta = il_sta;
435
436 /*
437 * Be careful not to use any members of il3945_rs_sta (like trying
438 * to use il_priv to print out debugging) since it may not be fully
439 * initialized at this point.
440 */
441 del_timer_sync(&rs_sta->rate_scale_flush);
442}
443
444/**
445 * il3945_rs_tx_status - Update rate control values based on Tx results
446 *
447 * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
448 * the hardware for each rate.
449 */
450static void
451il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
452 struct ieee80211_sta *sta, void *il_sta,
453 struct sk_buff *skb)
454{
455 s8 retries = 0, current_count;
456 int scale_rate_idx, first_idx, last_idx;
457 unsigned long flags;
458 struct il_priv *il = (struct il_priv *)il_rate;
459 struct il3945_rs_sta *rs_sta = il_sta;
460 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
461
462 D_RATE("enter\n");
463
464 retries = info->status.rates[0].count;
465 /* Sanity Check for retries */
466 if (retries > RATE_RETRY_TH)
467 retries = RATE_RETRY_TH;
468
469 first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
470 if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
471 D_RATE("leave: Rate out of bounds: %d\n", first_idx);
472 return;
473 }
474
475 if (!il_sta) {
476 D_RATE("leave: No STA il data to update!\n");
477 return;
478 }
479
480 /* Treat uninitialized rate scaling data same as non-existing. */
481 if (!rs_sta->il) {
482 D_RATE("leave: STA il data uninitialized!\n");
483 return;
484 }
485
486 rs_sta->tx_packets++;
487
488 scale_rate_idx = first_idx;
489 last_idx = first_idx;
490
491 /*
492 * Update the win for each rate. We determine which rates
493 * were Tx'd based on the total number of retries vs. the number
494 * of retries configured for each rate -- currently set to the
495 * il value 'retry_rate' vs. rate specific
496 *
497 * On exit from this while loop last_idx indicates the rate
498 * at which the frame was finally transmitted (or failed if no
499 * ACK)
500 */
501 while (retries > 1) {
502 if ((retries - 1) < il->retry_rate) {
503 current_count = (retries - 1);
504 last_idx = scale_rate_idx;
505 } else {
506 current_count = il->retry_rate;
507 last_idx = il3945_rs_next_rate(il, scale_rate_idx);
508 }
509
510 /* Update this rate accounting for as many retries
511 * as was used for it (per current_count) */
512 il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
513 current_count, scale_rate_idx);
514 D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
515 current_count);
516
517 retries -= current_count;
518
519 scale_rate_idx = last_idx;
520 }
521
522 /* Update the last idx win with success/failure based on ACK */
523 D_RATE("Update rate %d with %s.\n", last_idx,
524 (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
525 il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
526 info->flags & IEEE80211_TX_STAT_ACK, 1,
527 last_idx);
528
529 /* We updated the rate scale win -- if its been more than
530 * flush_time since the last run, schedule the flush
531 * again */
532 spin_lock_irqsave(&rs_sta->lock, flags);
533
534 if (!rs_sta->flush_pending &&
535 time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
536
537 rs_sta->last_partial_flush = jiffies;
538 rs_sta->flush_pending = 1;
539 mod_timer(&rs_sta->rate_scale_flush,
540 jiffies + rs_sta->flush_time);
541 }
542
543 spin_unlock_irqrestore(&rs_sta->lock, flags);
544
545 D_RATE("leave\n");
546}
547
548static u16
549il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
550 enum ieee80211_band band)
551{
552 u8 high = RATE_INVALID;
553 u8 low = RATE_INVALID;
554 struct il_priv *il __maybe_unused = rs_sta->il;
555
556 /* 802.11A walks to the next literal adjacent rate in
557 * the rate table */
558 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
559 int i;
560 u32 mask;
561
562 /* Find the previous rate that is in the rate mask */
563 i = idx - 1;
564 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
565 if (rate_mask & mask) {
566 low = i;
567 break;
568 }
569 }
570
571 /* Find the next rate that is in the rate mask */
572 i = idx + 1;
573 for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
574 if (rate_mask & mask) {
575 high = i;
576 break;
577 }
578 }
579
580 return (high << 8) | low;
581 }
582
583 low = idx;
584 while (low != RATE_INVALID) {
585 if (rs_sta->tgg)
586 low = il3945_rates[low].prev_rs_tgg;
587 else
588 low = il3945_rates[low].prev_rs;
589 if (low == RATE_INVALID)
590 break;
591 if (rate_mask & (1 << low))
592 break;
593 D_RATE("Skipping masked lower rate: %d\n", low);
594 }
595
596 high = idx;
597 while (high != RATE_INVALID) {
598 if (rs_sta->tgg)
599 high = il3945_rates[high].next_rs_tgg;
600 else
601 high = il3945_rates[high].next_rs;
602 if (high == RATE_INVALID)
603 break;
604 if (rate_mask & (1 << high))
605 break;
606 D_RATE("Skipping masked higher rate: %d\n", high);
607 }
608
609 return (high << 8) | low;
610}
611
612/**
613 * il3945_rs_get_rate - find the rate for the requested packet
614 *
615 * Returns the ieee80211_rate structure allocated by the driver.
616 *
617 * The rate control algorithm has no internal mapping between hw_mode's
618 * rate ordering and the rate ordering used by the rate control algorithm.
619 *
620 * The rate control algorithm uses a single table of rates that goes across
621 * the entire A/B/G spectrum vs. being limited to just one particular
622 * hw_mode.
623 *
624 * As such, we can't convert the idx obtained below into the hw_mode's
625 * rate table and must reference the driver allocated rate table
626 *
627 */
628static void
629il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
630 struct ieee80211_tx_rate_control *txrc)
631{
632 struct ieee80211_supported_band *sband = txrc->sband;
633 struct sk_buff *skb = txrc->skb;
634 u8 low = RATE_INVALID;
635 u8 high = RATE_INVALID;
636 u16 high_low;
637 int idx;
638 struct il3945_rs_sta *rs_sta = il_sta;
639 struct il3945_rate_scale_data *win = NULL;
640 int current_tpt = IL_INVALID_VALUE;
641 int low_tpt = IL_INVALID_VALUE;
642 int high_tpt = IL_INVALID_VALUE;
643 u32 fail_count;
644 s8 scale_action = 0;
645 unsigned long flags;
646 u16 rate_mask;
647 s8 max_rate_idx = -1;
648 struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
649 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
650
651 D_RATE("enter\n");
652
653 /* Treat uninitialized rate scaling data same as non-existing. */
654 if (rs_sta && !rs_sta->il) {
655 D_RATE("Rate scaling information not initialized yet.\n");
656 il_sta = NULL;
657 }
658
659 if (rate_control_send_low(sta, il_sta, txrc))
660 return;
661
662 rate_mask = sta->supp_rates[sband->band];
663
664 /* get user max rate if set */
665 max_rate_idx = txrc->max_rate_idx;
666 if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
667 max_rate_idx += IL_FIRST_OFDM_RATE;
668 if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
669 max_rate_idx = -1;
670
671 idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
672
673 if (sband->band == IEEE80211_BAND_5GHZ)
674 rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
675
676 spin_lock_irqsave(&rs_sta->lock, flags);
677
678 /* for recent assoc, choose best rate regarding
679 * to rssi value
680 */
681 if (rs_sta->start_rate != RATE_INVALID) {
682 if (rs_sta->start_rate < idx &&
683 (rate_mask & (1 << rs_sta->start_rate)))
684 idx = rs_sta->start_rate;
685 rs_sta->start_rate = RATE_INVALID;
686 }
687
688 /* force user max rate if set by user */
689 if (max_rate_idx != -1 && max_rate_idx < idx) {
690 if (rate_mask & (1 << max_rate_idx))
691 idx = max_rate_idx;
692 }
693
694 win = &(rs_sta->win[idx]);
695
696 fail_count = win->counter - win->success_counter;
697
698 if (fail_count < RATE_MIN_FAILURE_TH &&
699 win->success_counter < RATE_MIN_SUCCESS_TH) {
700 spin_unlock_irqrestore(&rs_sta->lock, flags);
701
702 D_RATE("Invalid average_tpt on rate %d: "
703 "counter: %d, success_counter: %d, "
704 "expected_tpt is %sNULL\n", idx, win->counter,
705 win->success_counter,
706 rs_sta->expected_tpt ? "not " : "");
707
708 /* Can't calculate this yet; not enough history */
709 win->average_tpt = IL_INVALID_VALUE;
710 goto out;
711
712 }
713
714 current_tpt = win->average_tpt;
715
716 high_low =
717 il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
718 low = high_low & 0xff;
719 high = (high_low >> 8) & 0xff;
720
721 /* If user set max rate, dont allow higher than user constrain */
722 if (max_rate_idx != -1 && max_rate_idx < high)
723 high = RATE_INVALID;
724
725 /* Collect Measured throughputs of adjacent rates */
726 if (low != RATE_INVALID)
727 low_tpt = rs_sta->win[low].average_tpt;
728
729 if (high != RATE_INVALID)
730 high_tpt = rs_sta->win[high].average_tpt;
731
732 spin_unlock_irqrestore(&rs_sta->lock, flags);
733
734 scale_action = 0;
735
736 /* Low success ratio , need to drop the rate */
737 if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
738 D_RATE("decrease rate because of low success_ratio\n");
739 scale_action = -1;
740 /* No throughput measured yet for adjacent rates,
741 * try increase */
742 } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
743
744 if (high != RATE_INVALID &&
745 win->success_ratio >= RATE_INCREASE_TH)
746 scale_action = 1;
747 else if (low != RATE_INVALID)
748 scale_action = 0;
749
750 /* Both adjacent throughputs are measured, but neither one has
751 * better throughput; we're using the best rate, don't change
752 * it! */
753 } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
754 && low_tpt < current_tpt && high_tpt < current_tpt) {
755
756 D_RATE("No action -- low [%d] & high [%d] < "
757 "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
758 scale_action = 0;
759
760 /* At least one of the rates has better throughput */
761 } else {
762 if (high_tpt != IL_INVALID_VALUE) {
763
764 /* High rate has better throughput, Increase
765 * rate */
766 if (high_tpt > current_tpt &&
767 win->success_ratio >= RATE_INCREASE_TH)
768 scale_action = 1;
769 else {
770 D_RATE("decrease rate because of high tpt\n");
771 scale_action = 0;
772 }
773 } else if (low_tpt != IL_INVALID_VALUE) {
774 if (low_tpt > current_tpt) {
775 D_RATE("decrease rate because of low tpt\n");
776 scale_action = -1;
777 } else if (win->success_ratio >= RATE_INCREASE_TH) {
778 /* Lower rate has better
779 * throughput,decrease rate */
780 scale_action = 1;
781 }
782 }
783 }
784
785 /* Sanity check; asked for decrease, but success rate or throughput
786 * has been good at old rate. Don't change it. */
787 if (scale_action == -1 && low != RATE_INVALID &&
788 (win->success_ratio > RATE_HIGH_TH ||
789 current_tpt > 100 * rs_sta->expected_tpt[low]))
790 scale_action = 0;
791
792 switch (scale_action) {
793 case -1:
794
795 /* Decrese rate */
796 if (low != RATE_INVALID)
797 idx = low;
798 break;
799
800 case 1:
801 /* Increase rate */
802 if (high != RATE_INVALID)
803 idx = high;
804
805 break;
806
807 case 0:
808 default:
809 /* No change */
810 break;
811 }
812
813 D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
814 low, high);
815
816out:
817
818 if (sband->band == IEEE80211_BAND_5GHZ) {
819 if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
820 idx = IL_FIRST_OFDM_RATE;
821 rs_sta->last_txrate_idx = idx;
822 info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
823 } else {
824 rs_sta->last_txrate_idx = idx;
825 info->control.rates[0].idx = rs_sta->last_txrate_idx;
826 }
827
828 D_RATE("leave: %d\n", idx);
829}
830
831#ifdef CONFIG_MAC80211_DEBUGFS
832static int
833il3945_open_file_generic(struct inode *inode, struct file *file)
834{
835 file->private_data = inode->i_private;
836 return 0;
837}
838
839static ssize_t
840il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
841 size_t count, loff_t *ppos)
842{
843 char *buff;
844 int desc = 0;
845 int j;
846 ssize_t ret;
847 struct il3945_rs_sta *lq_sta = file->private_data;
848
849 buff = kmalloc(1024, GFP_KERNEL);
850 if (!buff)
851 return -ENOMEM;
852
853 desc +=
854 sprintf(buff + desc,
855 "tx packets=%d last rate idx=%d\n"
856 "rate=0x%X flush time %d\n", lq_sta->tx_packets,
857 lq_sta->last_txrate_idx, lq_sta->start_rate,
858 jiffies_to_msecs(lq_sta->flush_time));
859 for (j = 0; j < RATE_COUNT_3945; j++) {
860 desc +=
861 sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
862 lq_sta->win[j].counter,
863 lq_sta->win[j].success_counter,
864 lq_sta->win[j].success_ratio);
865 }
866 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
867 kfree(buff);
868 return ret;
869}
870
871static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
872 .read = il3945_sta_dbgfs_stats_table_read,
873 .open = il3945_open_file_generic,
874 .llseek = default_llseek,
875};
876
877static void
878il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
879{
880 struct il3945_rs_sta *lq_sta = il_sta;
881
882 lq_sta->rs_sta_dbgfs_stats_table_file =
883 debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
884 &rs_sta_dbgfs_stats_table_ops);
885
886}
887
888static void
889il3945_remove_debugfs(void *il, void *il_sta)
890{
891 struct il3945_rs_sta *lq_sta = il_sta;
892 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
893}
894#endif
895
896/*
897 * Initialization of rate scaling information is done by driver after
898 * the station is added. Since mac80211 calls this function before a
899 * station is added we ignore it.
900 */
901static void
902il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
903 struct ieee80211_sta *sta, void *il_sta)
904{
905}
906
907static struct rate_control_ops rs_ops = {
908 .module = NULL,
909 .name = RS_NAME,
910 .tx_status = il3945_rs_tx_status,
911 .get_rate = il3945_rs_get_rate,
912 .rate_init = il3945_rs_rate_init_stub,
913 .alloc = il3945_rs_alloc,
914 .free = il3945_rs_free,
915 .alloc_sta = il3945_rs_alloc_sta,
916 .free_sta = il3945_rs_free_sta,
917#ifdef CONFIG_MAC80211_DEBUGFS
918 .add_sta_debugfs = il3945_add_debugfs,
919 .remove_sta_debugfs = il3945_remove_debugfs,
920#endif
921
922};
923
924void
925il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
926{
927 struct il_priv *il = hw->priv;
928 s32 rssi = 0;
929 unsigned long flags;
930 struct il3945_rs_sta *rs_sta;
931 struct ieee80211_sta *sta;
932 struct il3945_sta_priv *psta;
933
934 D_RATE("enter\n");
935
936 rcu_read_lock();
937
938 sta =
939 ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
940 if (!sta) {
941 D_RATE("Unable to find station to initialize rate scaling.\n");
942 rcu_read_unlock();
943 return;
944 }
945
946 psta = (void *)sta->drv_priv;
947 rs_sta = &psta->rs_sta;
948
949 spin_lock_irqsave(&rs_sta->lock, flags);
950
951 rs_sta->tgg = 0;
952 switch (il->band) {
953 case IEEE80211_BAND_2GHZ:
954 /* TODO: this always does G, not a regression */
955 if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
956 rs_sta->tgg = 1;
957 rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
958 } else
959 rs_sta->expected_tpt = il3945_expected_tpt_g;
960 break;
961
962 case IEEE80211_BAND_5GHZ:
963 rs_sta->expected_tpt = il3945_expected_tpt_a;
964 break;
965 case IEEE80211_NUM_BANDS:
966 BUG();
967 break;
968 }
969
970 spin_unlock_irqrestore(&rs_sta->lock, flags);
971
972 rssi = il->_3945.last_rx_rssi;
973 if (rssi == 0)
974 rssi = IL_MIN_RSSI_VAL;
975
976 D_RATE("Network RSSI: %d\n", rssi);
977
978 rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
979
980 D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
981 rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
982 rcu_read_unlock();
983}
984
985int
986il3945_rate_control_register(void)
987{
988 return ieee80211_rate_control_register(&rs_ops);
989}
990
991void
992il3945_rate_control_unregister(void)
993{
994 ieee80211_rate_control_unregister(&rs_ops);
995}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
new file mode 100644
index 000000000000..863664f9ba8b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -0,0 +1,2751 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/sched.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/firmware.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40#include <net/mac80211.h>
41
42#include "common.h"
43#include "3945.h"
44
45/* Send led command */
46static int
47il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
48{
49 struct il_host_cmd cmd = {
50 .id = C_LEDS,
51 .len = sizeof(struct il_led_cmd),
52 .data = led_cmd,
53 .flags = CMD_ASYNC,
54 .callback = NULL,
55 };
56
57 return il_send_cmd(il, &cmd);
58}
59
60const struct il_led_ops il3945_led_ops = {
61 .cmd = il3945_send_led_cmd,
62};
63
64#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
65 [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
66 RATE_##r##M_IEEE, \
67 RATE_##ip##M_IDX, \
68 RATE_##in##M_IDX, \
69 RATE_##rp##M_IDX, \
70 RATE_##rn##M_IDX, \
71 RATE_##pp##M_IDX, \
72 RATE_##np##M_IDX, \
73 RATE_##r##M_IDX_TBL, \
74 RATE_##ip##M_IDX_TBL }
75
76/*
77 * Parameter order:
78 * rate, prev rate, next rate, prev tgg rate, next tgg rate
79 *
80 * If there isn't a valid next or previous rate then INV is used which
81 * maps to RATE_INVALID
82 *
83 */
84const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
85 IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
86 IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
87 IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
88 IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
89 IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
90 IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
91 IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
92 IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
93 IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
94 IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
95 IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
96 IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */
97};
98
99static inline u8
100il3945_get_prev_ieee_rate(u8 rate_idx)
101{
102 u8 rate = il3945_rates[rate_idx].prev_ieee;
103
104 if (rate == RATE_INVALID)
105 rate = rate_idx;
106 return rate;
107}
108
109/* 1 = enable the il3945_disable_events() function */
110#define IL_EVT_DISABLE (0)
111#define IL_EVT_DISABLE_SIZE (1532/32)
112
113/**
114 * il3945_disable_events - Disable selected events in uCode event log
115 *
116 * Disable an event by writing "1"s into "disable"
117 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
118 * Default values of 0 enable uCode events to be logged.
119 * Use for only special debugging. This function is just a placeholder as-is,
120 * you'll need to provide the special bits! ...
121 * ... and set IL_EVT_DISABLE to 1. */
122void
123il3945_disable_events(struct il_priv *il)
124{
125 int i;
126 u32 base; /* SRAM address of event log header */
127 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
128 u32 array_size; /* # of u32 entries in array */
129 static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
130 0x00000000, /* 31 - 0 Event id numbers */
131 0x00000000, /* 63 - 32 */
132 0x00000000, /* 95 - 64 */
133 0x00000000, /* 127 - 96 */
134 0x00000000, /* 159 - 128 */
135 0x00000000, /* 191 - 160 */
136 0x00000000, /* 223 - 192 */
137 0x00000000, /* 255 - 224 */
138 0x00000000, /* 287 - 256 */
139 0x00000000, /* 319 - 288 */
140 0x00000000, /* 351 - 320 */
141 0x00000000, /* 383 - 352 */
142 0x00000000, /* 415 - 384 */
143 0x00000000, /* 447 - 416 */
144 0x00000000, /* 479 - 448 */
145 0x00000000, /* 511 - 480 */
146 0x00000000, /* 543 - 512 */
147 0x00000000, /* 575 - 544 */
148 0x00000000, /* 607 - 576 */
149 0x00000000, /* 639 - 608 */
150 0x00000000, /* 671 - 640 */
151 0x00000000, /* 703 - 672 */
152 0x00000000, /* 735 - 704 */
153 0x00000000, /* 767 - 736 */
154 0x00000000, /* 799 - 768 */
155 0x00000000, /* 831 - 800 */
156 0x00000000, /* 863 - 832 */
157 0x00000000, /* 895 - 864 */
158 0x00000000, /* 927 - 896 */
159 0x00000000, /* 959 - 928 */
160 0x00000000, /* 991 - 960 */
161 0x00000000, /* 1023 - 992 */
162 0x00000000, /* 1055 - 1024 */
163 0x00000000, /* 1087 - 1056 */
164 0x00000000, /* 1119 - 1088 */
165 0x00000000, /* 1151 - 1120 */
166 0x00000000, /* 1183 - 1152 */
167 0x00000000, /* 1215 - 1184 */
168 0x00000000, /* 1247 - 1216 */
169 0x00000000, /* 1279 - 1248 */
170 0x00000000, /* 1311 - 1280 */
171 0x00000000, /* 1343 - 1312 */
172 0x00000000, /* 1375 - 1344 */
173 0x00000000, /* 1407 - 1376 */
174 0x00000000, /* 1439 - 1408 */
175 0x00000000, /* 1471 - 1440 */
176 0x00000000, /* 1503 - 1472 */
177 };
178
179 base = le32_to_cpu(il->card_alive.log_event_table_ptr);
180 if (!il3945_hw_valid_rtc_data_addr(base)) {
181 IL_ERR("Invalid event log pointer 0x%08X\n", base);
182 return;
183 }
184
185 disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
186 array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
187
188 if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
189 D_INFO("Disabling selected uCode log events at 0x%x\n",
190 disable_ptr);
191 for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
192 il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
193 evt_disable[i]);
194
195 } else {
196 D_INFO("Selected uCode log events may be disabled\n");
197 D_INFO(" by writing \"1\"s into disable bitmap\n");
198 D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr,
199 array_size);
200 }
201
202}
203
204static int
205il3945_hwrate_to_plcp_idx(u8 plcp)
206{
207 int idx;
208
209 for (idx = 0; idx < RATE_COUNT_3945; idx++)
210 if (il3945_rates[idx].plcp == plcp)
211 return idx;
212 return -1;
213}
214
215#ifdef CONFIG_IWLEGACY_DEBUG
216#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
217
218static const char *
219il3945_get_tx_fail_reason(u32 status)
220{
221 switch (status & TX_STATUS_MSK) {
222 case TX_3945_STATUS_SUCCESS:
223 return "SUCCESS";
224 TX_STATUS_ENTRY(SHORT_LIMIT);
225 TX_STATUS_ENTRY(LONG_LIMIT);
226 TX_STATUS_ENTRY(FIFO_UNDERRUN);
227 TX_STATUS_ENTRY(MGMNT_ABORT);
228 TX_STATUS_ENTRY(NEXT_FRAG);
229 TX_STATUS_ENTRY(LIFE_EXPIRE);
230 TX_STATUS_ENTRY(DEST_PS);
231 TX_STATUS_ENTRY(ABORTED);
232 TX_STATUS_ENTRY(BT_RETRY);
233 TX_STATUS_ENTRY(STA_INVALID);
234 TX_STATUS_ENTRY(FRAG_DROPPED);
235 TX_STATUS_ENTRY(TID_DISABLE);
236 TX_STATUS_ENTRY(FRAME_FLUSHED);
237 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
238 TX_STATUS_ENTRY(TX_LOCKED);
239 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
240 }
241
242 return "UNKNOWN";
243}
244#else
245static inline const char *
246il3945_get_tx_fail_reason(u32 status)
247{
248 return "";
249}
250#endif
251
252/*
253 * get ieee prev rate from rate scale table.
254 * for A and B mode we need to overright prev
255 * value
256 */
257int
258il3945_rs_next_rate(struct il_priv *il, int rate)
259{
260 int next_rate = il3945_get_prev_ieee_rate(rate);
261
262 switch (il->band) {
263 case IEEE80211_BAND_5GHZ:
264 if (rate == RATE_12M_IDX)
265 next_rate = RATE_9M_IDX;
266 else if (rate == RATE_6M_IDX)
267 next_rate = RATE_6M_IDX;
268 break;
269 case IEEE80211_BAND_2GHZ:
270 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
271 il_is_associated(il)) {
272 if (rate == RATE_11M_IDX)
273 next_rate = RATE_5M_IDX;
274 }
275 break;
276
277 default:
278 break;
279 }
280
281 return next_rate;
282}
283
284/**
285 * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
286 *
287 * When FW advances 'R' idx, all entries between old and new 'R' idx
288 * need to be reclaimed. As result, some free space forms. If there is
289 * enough free space (> low mark), wake the stack that feeds us.
290 */
291static void
292il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
293{
294 struct il_tx_queue *txq = &il->txq[txq_id];
295 struct il_queue *q = &txq->q;
296 struct il_tx_info *tx_info;
297
298 BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
299
300 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
301 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
302
303 tx_info = &txq->txb[txq->q.read_ptr];
304 ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
305 tx_info->skb = NULL;
306 il->cfg->ops->lib->txq_free_tfd(il, txq);
307 }
308
309 if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
310 txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
311 il_wake_queue(il, txq);
312}
313
314/**
315 * il3945_hdl_tx - Handle Tx response
316 */
317static void
318il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
319{
320 struct il_rx_pkt *pkt = rxb_addr(rxb);
321 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
322 int txq_id = SEQ_TO_QUEUE(sequence);
323 int idx = SEQ_TO_IDX(sequence);
324 struct il_tx_queue *txq = &il->txq[txq_id];
325 struct ieee80211_tx_info *info;
326 struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
327 u32 status = le32_to_cpu(tx_resp->status);
328 int rate_idx;
329 int fail;
330
331 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
332 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
333 "is out of range [0-%d] %d %d\n", txq_id, idx,
334 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
335 return;
336 }
337
338 txq->time_stamp = jiffies;
339 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
340 ieee80211_tx_info_clear_status(info);
341
342 /* Fill the MRR chain with some info about on-chip retransmissions */
343 rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
344 if (info->band == IEEE80211_BAND_5GHZ)
345 rate_idx -= IL_FIRST_OFDM_RATE;
346
347 fail = tx_resp->failure_frame;
348
349 info->status.rates[0].idx = rate_idx;
350 info->status.rates[0].count = fail + 1; /* add final attempt */
351
352 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
353 info->flags |=
354 ((status & TX_STATUS_MSK) ==
355 TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
356
357 D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
358 il3945_get_tx_fail_reason(status), status, tx_resp->rate,
359 tx_resp->failure_frame);
360
361 D_TX_REPLY("Tx queue reclaim %d\n", idx);
362 il3945_tx_queue_reclaim(il, txq_id, idx);
363
364 if (status & TX_ABORT_REQUIRED_MSK)
365 IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n");
366}
367
368/*****************************************************************************
369 *
370 * Intel PRO/Wireless 3945ABG/BG Network Connection
371 *
372 * RX handler implementations
373 *
374 *****************************************************************************/
375#ifdef CONFIG_IWLEGACY_DEBUGFS
376static void
377il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
378{
379 int i;
380 __le32 *prev_stats;
381 u32 *accum_stats;
382 u32 *delta, *max_delta;
383
384 prev_stats = (__le32 *) &il->_3945.stats;
385 accum_stats = (u32 *) &il->_3945.accum_stats;
386 delta = (u32 *) &il->_3945.delta_stats;
387 max_delta = (u32 *) &il->_3945.max_delta;
388
389 for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
390 i +=
391 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
392 accum_stats++) {
393 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
394 *delta =
395 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
396 *accum_stats += *delta;
397 if (*delta > *max_delta)
398 *max_delta = *delta;
399 }
400 }
401
402 /* reset accumulative stats for "no-counter" type stats */
403 il->_3945.accum_stats.general.temperature =
404 il->_3945.stats.general.temperature;
405 il->_3945.accum_stats.general.ttl_timestamp =
406 il->_3945.stats.general.ttl_timestamp;
407}
408#endif
409
410void
411il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
412{
413 struct il_rx_pkt *pkt = rxb_addr(rxb);
414
415 D_RX("Statistics notification received (%d vs %d).\n",
416 (int)sizeof(struct il3945_notif_stats),
417 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
418#ifdef CONFIG_IWLEGACY_DEBUGFS
419 il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
420#endif
421
422 memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
423}
424
425void
426il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
427{
428 struct il_rx_pkt *pkt = rxb_addr(rxb);
429 __le32 *flag = (__le32 *) &pkt->u.raw;
430
431 if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
432#ifdef CONFIG_IWLEGACY_DEBUGFS
433 memset(&il->_3945.accum_stats, 0,
434 sizeof(struct il3945_notif_stats));
435 memset(&il->_3945.delta_stats, 0,
436 sizeof(struct il3945_notif_stats));
437 memset(&il->_3945.max_delta, 0,
438 sizeof(struct il3945_notif_stats));
439#endif
440 D_RX("Statistics have been cleared\n");
441 }
442 il3945_hdl_stats(il, rxb);
443}
444
445/******************************************************************************
446 *
447 * Misc. internal state and helper functions
448 *
449 ******************************************************************************/
450
451/* This is necessary only for a number of stats, see the caller. */
452static int
453il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
454{
455 /* Filter incoming packets to determine if they are targeted toward
456 * this network, discarding packets coming from ourselves */
457 switch (il->iw_mode) {
458 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
459 /* packets to our IBSS update information */
460 return !compare_ether_addr(header->addr3, il->bssid);
461 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
462 /* packets to our IBSS update information */
463 return !compare_ether_addr(header->addr2, il->bssid);
464 default:
465 return 1;
466 }
467}
468
469static void
470il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
471 struct ieee80211_rx_status *stats)
472{
473 struct il_rx_pkt *pkt = rxb_addr(rxb);
474 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
475 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
476 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
477 u16 len = le16_to_cpu(rx_hdr->len);
478 struct sk_buff *skb;
479 __le16 fc = hdr->frame_control;
480
481 /* We received data from the HW, so stop the watchdog */
482 if (unlikely
483 (len + IL39_RX_FRAME_SIZE >
484 PAGE_SIZE << il->hw_params.rx_page_order)) {
485 D_DROP("Corruption detected!\n");
486 return;
487 }
488
489 /* We only process data packets if the interface is open */
490 if (unlikely(!il->is_open)) {
491 D_DROP("Dropping packet while interface is not open.\n");
492 return;
493 }
494
495 skb = dev_alloc_skb(128);
496 if (!skb) {
497 IL_ERR("dev_alloc_skb failed\n");
498 return;
499 }
500
501 if (!il3945_mod_params.sw_crypto)
502 il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
503 le32_to_cpu(rx_end->status), stats);
504
505 skb_add_rx_frag(skb, 0, rxb->page,
506 (void *)rx_hdr->payload - (void *)pkt, len);
507
508 il_update_stats(il, false, fc, len);
509 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
510
511 ieee80211_rx(il->hw, skb);
512 il->alloc_rxb_page--;
513 rxb->page = NULL;
514}
515
516#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
517
518static void
519il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
520{
521 struct ieee80211_hdr *header;
522 struct ieee80211_rx_status rx_status;
523 struct il_rx_pkt *pkt = rxb_addr(rxb);
524 struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
525 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
526 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
527 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
528 u16 rx_stats_noise_diff __maybe_unused =
529 le16_to_cpu(rx_stats->noise_diff);
530 u8 network_packet;
531
532 rx_status.flag = 0;
533 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
534 rx_status.band =
535 (rx_hdr->
536 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
537 IEEE80211_BAND_5GHZ;
538 rx_status.freq =
539 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
540 rx_status.band);
541
542 rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
543 if (rx_status.band == IEEE80211_BAND_5GHZ)
544 rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
545
546 rx_status.antenna =
547 (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
548 4;
549
550 /* set the preamble flag if appropriate */
551 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
552 rx_status.flag |= RX_FLAG_SHORTPRE;
553
554 if ((unlikely(rx_stats->phy_count > 20))) {
555 D_DROP("dsp size out of range [0,20]: %d/n",
556 rx_stats->phy_count);
557 return;
558 }
559
560 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
561 !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
562 D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
563 return;
564 }
565
566 /* Convert 3945's rssi indicator to dBm */
567 rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
568
569 D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
570 rx_stats_sig_avg, rx_stats_noise_diff);
571
572 header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
573
574 network_packet = il3945_is_network_packet(il, header);
575
576 D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
577 network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
578 rx_status.signal, rx_status.signal, rx_status.rate_idx);
579
580 il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
581
582 if (network_packet) {
583 il->_3945.last_beacon_time =
584 le32_to_cpu(rx_end->beacon_timestamp);
585 il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
586 il->_3945.last_rx_rssi = rx_status.signal;
587 }
588
589 il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
590}
591
592int
593il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
594 dma_addr_t addr, u16 len, u8 reset, u8 pad)
595{
596 int count;
597 struct il_queue *q;
598 struct il3945_tfd *tfd, *tfd_tmp;
599
600 q = &txq->q;
601 tfd_tmp = (struct il3945_tfd *)txq->tfds;
602 tfd = &tfd_tmp[q->write_ptr];
603
604 if (reset)
605 memset(tfd, 0, sizeof(*tfd));
606
607 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
608
609 if (count >= NUM_TFD_CHUNKS || count < 0) {
610 IL_ERR("Error can not send more than %d chunks\n",
611 NUM_TFD_CHUNKS);
612 return -EINVAL;
613 }
614
615 tfd->tbs[count].addr = cpu_to_le32(addr);
616 tfd->tbs[count].len = cpu_to_le32(len);
617
618 count++;
619
620 tfd->control_flags =
621 cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
622
623 return 0;
624}
625
626/**
627 * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
628 *
629 * Does NOT advance any idxes
630 */
631void
632il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
633{
634 struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
635 int idx = txq->q.read_ptr;
636 struct il3945_tfd *tfd = &tfd_tmp[idx];
637 struct pci_dev *dev = il->pci_dev;
638 int i;
639 int counter;
640
641 /* sanity check */
642 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
643 if (counter > NUM_TFD_CHUNKS) {
644 IL_ERR("Too many chunks: %i\n", counter);
645 /* @todo issue fatal error, it is quite serious situation */
646 return;
647 }
648
649 /* Unmap tx_cmd */
650 if (counter)
651 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
652 dma_unmap_len(&txq->meta[idx], len),
653 PCI_DMA_TODEVICE);
654
655 /* unmap chunks if any */
656
657 for (i = 1; i < counter; i++)
658 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
659 le32_to_cpu(tfd->tbs[i].len),
660 PCI_DMA_TODEVICE);
661
662 /* free SKB */
663 if (txq->txb) {
664 struct sk_buff *skb;
665
666 skb = txq->txb[txq->q.read_ptr].skb;
667
668 /* can be called from irqs-disabled context */
669 if (skb) {
670 dev_kfree_skb_any(skb);
671 txq->txb[txq->q.read_ptr].skb = NULL;
672 }
673 }
674}
675
676/**
677 * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
678 *
679*/
680void
681il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
682 struct ieee80211_tx_info *info,
683 struct ieee80211_hdr *hdr, int sta_id, int tx_id)
684{
685 u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
686 u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945);
687 u16 rate_mask;
688 int rate;
689 u8 rts_retry_limit;
690 u8 data_retry_limit;
691 __le32 tx_flags;
692 __le16 fc = hdr->frame_control;
693 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
694
695 rate = il3945_rates[rate_idx].plcp;
696 tx_flags = tx_cmd->tx_flags;
697
698 /* We need to figure out how to get the sta->supp_rates while
699 * in this running context */
700 rate_mask = RATES_MASK_3945;
701
702 /* Set retry limit on DATA packets and Probe Responses */
703 if (ieee80211_is_probe_resp(fc))
704 data_retry_limit = 3;
705 else
706 data_retry_limit = IL_DEFAULT_TX_RETRY;
707 tx_cmd->data_retry_limit = data_retry_limit;
708
709 if (tx_id >= IL39_CMD_QUEUE_NUM)
710 rts_retry_limit = 3;
711 else
712 rts_retry_limit = 7;
713
714 if (data_retry_limit < rts_retry_limit)
715 rts_retry_limit = data_retry_limit;
716 tx_cmd->rts_retry_limit = rts_retry_limit;
717
718 tx_cmd->rate = rate;
719 tx_cmd->tx_flags = tx_flags;
720
721 /* OFDM */
722 tx_cmd->supp_rates[0] =
723 ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
724
725 /* CCK */
726 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
727
728 D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
729 "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
730 le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
731 tx_cmd->supp_rates[0]);
732}
733
734static u8
735il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
736{
737 unsigned long flags_spin;
738 struct il_station_entry *station;
739
740 if (sta_id == IL_INVALID_STATION)
741 return IL_INVALID_STATION;
742
743 spin_lock_irqsave(&il->sta_lock, flags_spin);
744 station = &il->stations[sta_id];
745
746 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
747 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
748 station->sta.mode = STA_CONTROL_MODIFY_MSK;
749 il_send_add_sta(il, &station->sta, CMD_ASYNC);
750 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
751
752 D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
753 return sta_id;
754}
755
756static void
757il3945_set_pwr_vmain(struct il_priv *il)
758{
759/*
760 * (for documentation purposes)
761 * to set power to V_AUX, do
762
763 if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
764 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
765 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
766 ~APMG_PS_CTRL_MSK_PWR_SRC);
767
768 _il_poll_bit(il, CSR_GPIO_IN,
769 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
770 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
771 }
772 */
773
774 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
775 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
776 ~APMG_PS_CTRL_MSK_PWR_SRC);
777
778 _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
779 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
780}
781
782static int
783il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
784{
785 il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
786 il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
787 il_wr(il, FH39_RCSR_WPTR(0), 0);
788 il_wr(il, FH39_RCSR_CONFIG(0),
789 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
790 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
791 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
792 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
793 <<
794 FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
795 | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
796 FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
797 | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
798
799 /* fake read to flush all prev I/O */
800 il_rd(il, FH39_RSSR_CTRL);
801
802 return 0;
803}
804
805static int
806il3945_tx_reset(struct il_priv *il)
807{
808
809 /* bypass mode */
810 il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
811
812 /* RA 0 is active */
813 il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
814
815 /* all 6 fifo are active */
816 il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
817
818 il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
819 il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
820 il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
821 il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
822
823 il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
824
825 il_wr(il, FH39_TSSR_MSG_CONFIG,
826 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
827 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
828 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
829 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
830 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
831 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
832 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
833
834 return 0;
835}
836
837/**
838 * il3945_txq_ctx_reset - Reset TX queue context
839 *
840 * Destroys all DMA structures and initialize them again
841 */
842static int
843il3945_txq_ctx_reset(struct il_priv *il)
844{
845 int rc;
846 int txq_id, slots_num;
847
848 il3945_hw_txq_ctx_free(il);
849
850 /* allocate tx queue structure */
851 rc = il_alloc_txq_mem(il);
852 if (rc)
853 return rc;
854
855 /* Tx CMD queue */
856 rc = il3945_tx_reset(il);
857 if (rc)
858 goto error;
859
860 /* Tx queue(s) */
861 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
862 slots_num =
863 (txq_id ==
864 IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
865 rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
866 if (rc) {
867 IL_ERR("Tx %d queue init failed\n", txq_id);
868 goto error;
869 }
870 }
871
872 return rc;
873
874error:
875 il3945_hw_txq_ctx_free(il);
876 return rc;
877}
878
879/*
880 * Start up 3945's basic functionality after it has been reset
881 * (e.g. after platform boot, or shutdown via il_apm_stop())
882 * NOTE: This does not load uCode nor start the embedded processor
883 */
884static int
885il3945_apm_init(struct il_priv *il)
886{
887 int ret = il_apm_init(il);
888
889 /* Clear APMG (NIC's internal power management) interrupts */
890 il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
891 il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
892
893 /* Reset radio chip */
894 il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
895 udelay(5);
896 il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
897
898 return ret;
899}
900
901static void
902il3945_nic_config(struct il_priv *il)
903{
904 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
905 unsigned long flags;
906 u8 rev_id = il->pci_dev->revision;
907
908 spin_lock_irqsave(&il->lock, flags);
909
910 /* Determine HW type */
911 D_INFO("HW Revision ID = 0x%X\n", rev_id);
912
913 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
914 D_INFO("RTP type\n");
915 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
916 D_INFO("3945 RADIO-MB type\n");
917 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
918 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
919 } else {
920 D_INFO("3945 RADIO-MM type\n");
921 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
922 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
923 }
924
925 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
926 D_INFO("SKU OP mode is mrc\n");
927 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
928 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
929 } else
930 D_INFO("SKU OP mode is basic\n");
931
932 if ((eeprom->board_revision & 0xF0) == 0xD0) {
933 D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
934 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
935 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
936 } else {
937 D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
938 il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
939 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
940 }
941
942 if (eeprom->almgor_m_version <= 1) {
943 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
944 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
945 D_INFO("Card M type A version is 0x%X\n",
946 eeprom->almgor_m_version);
947 } else {
948 D_INFO("Card M type B version is 0x%X\n",
949 eeprom->almgor_m_version);
950 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
951 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
952 }
953 spin_unlock_irqrestore(&il->lock, flags);
954
955 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
956 D_RF_KILL("SW RF KILL supported in EEPROM.\n");
957
958 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
959 D_RF_KILL("HW RF KILL supported in EEPROM.\n");
960}
961
962int
963il3945_hw_nic_init(struct il_priv *il)
964{
965 int rc;
966 unsigned long flags;
967 struct il_rx_queue *rxq = &il->rxq;
968
969 spin_lock_irqsave(&il->lock, flags);
970 il->cfg->ops->lib->apm_ops.init(il);
971 spin_unlock_irqrestore(&il->lock, flags);
972
973 il3945_set_pwr_vmain(il);
974
975 il->cfg->ops->lib->apm_ops.config(il);
976
977 /* Allocate the RX queue, or reset if it is already allocated */
978 if (!rxq->bd) {
979 rc = il_rx_queue_alloc(il);
980 if (rc) {
981 IL_ERR("Unable to initialize Rx queue\n");
982 return -ENOMEM;
983 }
984 } else
985 il3945_rx_queue_reset(il, rxq);
986
987 il3945_rx_replenish(il);
988
989 il3945_rx_init(il, rxq);
990
991 /* Look at using this instead:
992 rxq->need_update = 1;
993 il_rx_queue_update_write_ptr(il, rxq);
994 */
995
996 il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
997
998 rc = il3945_txq_ctx_reset(il);
999 if (rc)
1000 return rc;
1001
1002 set_bit(S_INIT, &il->status);
1003
1004 return 0;
1005}
1006
1007/**
1008 * il3945_hw_txq_ctx_free - Free TXQ Context
1009 *
1010 * Destroy all TX DMA queues and structures
1011 */
1012void
1013il3945_hw_txq_ctx_free(struct il_priv *il)
1014{
1015 int txq_id;
1016
1017 /* Tx queues */
1018 if (il->txq)
1019 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1020 if (txq_id == IL39_CMD_QUEUE_NUM)
1021 il_cmd_queue_free(il);
1022 else
1023 il_tx_queue_free(il, txq_id);
1024
1025 /* free tx queue structure */
1026 il_txq_mem(il);
1027}
1028
1029void
1030il3945_hw_txq_ctx_stop(struct il_priv *il)
1031{
1032 int txq_id;
1033
1034 /* stop SCD */
1035 il_wr_prph(il, ALM_SCD_MODE_REG, 0);
1036 il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
1037
1038 /* reset TFD queues */
1039 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
1040 il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
1041 il_poll_bit(il, FH39_TSSR_TX_STATUS,
1042 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1043 1000);
1044 }
1045
1046 il3945_hw_txq_ctx_free(il);
1047}
1048
1049/**
1050 * il3945_hw_reg_adjust_power_by_temp
1051 * return idx delta into power gain settings table
1052*/
1053static int
1054il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1055{
1056 return (new_reading - old_reading) * (-11) / 100;
1057}
1058
1059/**
1060 * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1061 */
1062static inline int
1063il3945_hw_reg_temp_out_of_range(int temperature)
1064{
1065 return (temperature < -260 || temperature > 25) ? 1 : 0;
1066}
1067
1068int
1069il3945_hw_get_temperature(struct il_priv *il)
1070{
1071 return _il_rd(il, CSR_UCODE_DRV_GP2);
1072}
1073
1074/**
1075 * il3945_hw_reg_txpower_get_temperature
1076 * get the current temperature by reading from NIC
1077*/
1078static int
1079il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
1080{
1081 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1082 int temperature;
1083
1084 temperature = il3945_hw_get_temperature(il);
1085
1086 /* driver's okay range is -260 to +25.
1087 * human readable okay range is 0 to +285 */
1088 D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
1089
1090 /* handle insane temp reading */
1091 if (il3945_hw_reg_temp_out_of_range(temperature)) {
1092 IL_ERR("Error bad temperature value %d\n", temperature);
1093
1094 /* if really really hot(?),
1095 * substitute the 3rd band/group's temp measured at factory */
1096 if (il->last_temperature > 100)
1097 temperature = eeprom->groups[2].temperature;
1098 else /* else use most recent "sane" value from driver */
1099 temperature = il->last_temperature;
1100 }
1101
1102 return temperature; /* raw, not "human readable" */
1103}
1104
1105/* Adjust Txpower only if temperature variance is greater than threshold.
1106 *
1107 * Both are lower than older versions' 9 degrees */
1108#define IL_TEMPERATURE_LIMIT_TIMER 6
1109
1110/**
1111 * il3945_is_temp_calib_needed - determines if new calibration is needed
1112 *
1113 * records new temperature in tx_mgr->temperature.
1114 * replaces tx_mgr->last_temperature *only* if calib needed
1115 * (assumes caller will actually do the calibration!). */
1116static int
1117il3945_is_temp_calib_needed(struct il_priv *il)
1118{
1119 int temp_diff;
1120
1121 il->temperature = il3945_hw_reg_txpower_get_temperature(il);
1122 temp_diff = il->temperature - il->last_temperature;
1123
1124 /* get absolute value */
1125 if (temp_diff < 0) {
1126 D_POWER("Getting cooler, delta %d,\n", temp_diff);
1127 temp_diff = -temp_diff;
1128 } else if (temp_diff == 0)
1129 D_POWER("Same temp,\n");
1130 else
1131 D_POWER("Getting warmer, delta %d,\n", temp_diff);
1132
1133 /* if we don't need calibration, *don't* update last_temperature */
1134 if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
1135 D_POWER("Timed thermal calib not needed\n");
1136 return 0;
1137 }
1138
1139 D_POWER("Timed thermal calib needed\n");
1140
1141 /* assume that caller will actually do calib ...
1142 * update the "last temperature" value */
1143 il->last_temperature = il->temperature;
1144 return 1;
1145}
1146
1147#define IL_MAX_GAIN_ENTRIES 78
1148#define IL_CCK_FROM_OFDM_POWER_DIFF -5
1149#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
1150
1151/* radio and DSP power table, each step is 1/2 dB.
1152 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1153static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
1154 {
1155 {251, 127}, /* 2.4 GHz, highest power */
1156 {251, 127},
1157 {251, 127},
1158 {251, 127},
1159 {251, 125},
1160 {251, 110},
1161 {251, 105},
1162 {251, 98},
1163 {187, 125},
1164 {187, 115},
1165 {187, 108},
1166 {187, 99},
1167 {243, 119},
1168 {243, 111},
1169 {243, 105},
1170 {243, 97},
1171 {243, 92},
1172 {211, 106},
1173 {211, 100},
1174 {179, 120},
1175 {179, 113},
1176 {179, 107},
1177 {147, 125},
1178 {147, 119},
1179 {147, 112},
1180 {147, 106},
1181 {147, 101},
1182 {147, 97},
1183 {147, 91},
1184 {115, 107},
1185 {235, 121},
1186 {235, 115},
1187 {235, 109},
1188 {203, 127},
1189 {203, 121},
1190 {203, 115},
1191 {203, 108},
1192 {203, 102},
1193 {203, 96},
1194 {203, 92},
1195 {171, 110},
1196 {171, 104},
1197 {171, 98},
1198 {139, 116},
1199 {227, 125},
1200 {227, 119},
1201 {227, 113},
1202 {227, 107},
1203 {227, 101},
1204 {227, 96},
1205 {195, 113},
1206 {195, 106},
1207 {195, 102},
1208 {195, 95},
1209 {163, 113},
1210 {163, 106},
1211 {163, 102},
1212 {163, 95},
1213 {131, 113},
1214 {131, 106},
1215 {131, 102},
1216 {131, 95},
1217 {99, 113},
1218 {99, 106},
1219 {99, 102},
1220 {99, 95},
1221 {67, 113},
1222 {67, 106},
1223 {67, 102},
1224 {67, 95},
1225 {35, 113},
1226 {35, 106},
1227 {35, 102},
1228 {35, 95},
1229 {3, 113},
1230 {3, 106},
1231 {3, 102},
1232 {3, 95} /* 2.4 GHz, lowest power */
1233 },
1234 {
1235 {251, 127}, /* 5.x GHz, highest power */
1236 {251, 120},
1237 {251, 114},
1238 {219, 119},
1239 {219, 101},
1240 {187, 113},
1241 {187, 102},
1242 {155, 114},
1243 {155, 103},
1244 {123, 117},
1245 {123, 107},
1246 {123, 99},
1247 {123, 92},
1248 {91, 108},
1249 {59, 125},
1250 {59, 118},
1251 {59, 109},
1252 {59, 102},
1253 {59, 96},
1254 {59, 90},
1255 {27, 104},
1256 {27, 98},
1257 {27, 92},
1258 {115, 118},
1259 {115, 111},
1260 {115, 104},
1261 {83, 126},
1262 {83, 121},
1263 {83, 113},
1264 {83, 105},
1265 {83, 99},
1266 {51, 118},
1267 {51, 111},
1268 {51, 104},
1269 {51, 98},
1270 {19, 116},
1271 {19, 109},
1272 {19, 102},
1273 {19, 98},
1274 {19, 93},
1275 {171, 113},
1276 {171, 107},
1277 {171, 99},
1278 {139, 120},
1279 {139, 113},
1280 {139, 107},
1281 {139, 99},
1282 {107, 120},
1283 {107, 113},
1284 {107, 107},
1285 {107, 99},
1286 {75, 120},
1287 {75, 113},
1288 {75, 107},
1289 {75, 99},
1290 {43, 120},
1291 {43, 113},
1292 {43, 107},
1293 {43, 99},
1294 {11, 120},
1295 {11, 113},
1296 {11, 107},
1297 {11, 99},
1298 {131, 107},
1299 {131, 99},
1300 {99, 120},
1301 {99, 113},
1302 {99, 107},
1303 {99, 99},
1304 {67, 120},
1305 {67, 113},
1306 {67, 107},
1307 {67, 99},
1308 {35, 120},
1309 {35, 113},
1310 {35, 107},
1311 {35, 99},
1312 {3, 120} /* 5.x GHz, lowest power */
1313 }
1314};
1315
1316static inline u8
1317il3945_hw_reg_fix_power_idx(int idx)
1318{
1319 if (idx < 0)
1320 return 0;
1321 if (idx >= IL_MAX_GAIN_ENTRIES)
1322 return IL_MAX_GAIN_ENTRIES - 1;
1323 return (u8) idx;
1324}
1325
1326/* Kick off thermal recalibration check every 60 seconds */
1327#define REG_RECALIB_PERIOD (60)
1328
1329/**
1330 * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1331 *
1332 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1333 * or 6 Mbit (OFDM) rates.
1334 */
1335static void
1336il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
1337 const s8 *clip_pwrs,
1338 struct il_channel_info *ch_info, int band_idx)
1339{
1340 struct il3945_scan_power_info *scan_power_info;
1341 s8 power;
1342 u8 power_idx;
1343
1344 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
1345
1346 /* use this channel group's 6Mbit clipping/saturation pwr,
1347 * but cap at regulatory scan power restriction (set during init
1348 * based on eeprom channel data) for this channel. */
1349 power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
1350
1351 power = min(power, il->tx_power_user_lmt);
1352 scan_power_info->requested_power = power;
1353
1354 /* find difference between new scan *power* and current "normal"
1355 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1356 * current "normal" temperature-compensated Tx power *idx* for
1357 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1358 * *idx*. */
1359 power_idx =
1360 ch_info->power_info[rate_idx].power_table_idx - (power -
1361 ch_info->
1362 power_info
1363 [RATE_6M_IDX_TBL].
1364 requested_power) *
1365 2;
1366
1367 /* store reference idx that we use when adjusting *all* scan
1368 * powers. So we can accommodate user (all channel) or spectrum
1369 * management (single channel) power changes "between" temperature
1370 * feedback compensation procedures.
1371 * don't force fit this reference idx into gain table; it may be a
1372 * negative number. This will help avoid errors when we're at
1373 * the lower bounds (highest gains, for warmest temperatures)
1374 * of the table. */
1375
1376 /* don't exceed table bounds for "real" setting */
1377 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1378
1379 scan_power_info->power_table_idx = power_idx;
1380 scan_power_info->tpc.tx_gain =
1381 power_gain_table[band_idx][power_idx].tx_gain;
1382 scan_power_info->tpc.dsp_atten =
1383 power_gain_table[band_idx][power_idx].dsp_atten;
1384}
1385
1386/**
1387 * il3945_send_tx_power - fill in Tx Power command with gain settings
1388 *
1389 * Configures power settings for all rates for the current channel,
1390 * using values from channel info struct, and send to NIC
1391 */
1392static int
1393il3945_send_tx_power(struct il_priv *il)
1394{
1395 int rate_idx, i;
1396 const struct il_channel_info *ch_info = NULL;
1397 struct il3945_txpowertable_cmd txpower = {
1398 .channel = il->ctx.active.channel,
1399 };
1400 u16 chan;
1401
1402 if (WARN_ONCE
1403 (test_bit(S_SCAN_HW, &il->status),
1404 "TX Power requested while scanning!\n"))
1405 return -EAGAIN;
1406
1407 chan = le16_to_cpu(il->ctx.active.channel);
1408
1409 txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1410 ch_info = il_get_channel_info(il, il->band, chan);
1411 if (!ch_info) {
1412 IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
1413 il->band);
1414 return -EINVAL;
1415 }
1416
1417 if (!il_is_channel_valid(ch_info)) {
1418 D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
1419 return 0;
1420 }
1421
1422 /* fill cmd with power settings for all rates for current channel */
1423 /* Fill OFDM rate */
1424 for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
1425 rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
1426
1427 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1428 txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1429
1430 D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1431 le16_to_cpu(txpower.channel), txpower.band,
1432 txpower.power[i].tpc.tx_gain,
1433 txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1434 }
1435 /* Fill CCK rates */
1436 for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
1437 rate_idx++, i++) {
1438 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1439 txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1440
1441 D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1442 le16_to_cpu(txpower.channel), txpower.band,
1443 txpower.power[i].tpc.tx_gain,
1444 txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1445 }
1446
1447 return il_send_cmd_pdu(il, C_TX_PWR_TBL,
1448 sizeof(struct il3945_txpowertable_cmd),
1449 &txpower);
1450
1451}
1452
1453/**
1454 * il3945_hw_reg_set_new_power - Configures power tables at new levels
1455 * @ch_info: Channel to update. Uses power_info.requested_power.
1456 *
1457 * Replace requested_power and base_power_idx ch_info fields for
1458 * one channel.
1459 *
1460 * Called if user or spectrum management changes power preferences.
1461 * Takes into account h/w and modulation limitations (clip power).
1462 *
1463 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1464 *
1465 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1466 * properly fill out the scan powers, and actual h/w gain settings,
1467 * and send changes to NIC
1468 */
1469static int
1470il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
1471{
1472 struct il3945_channel_power_info *power_info;
1473 int power_changed = 0;
1474 int i;
1475 const s8 *clip_pwrs;
1476 int power;
1477
1478 /* Get this chnlgrp's rate-to-max/clip-powers table */
1479 clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1480
1481 /* Get this channel's rate-to-current-power settings table */
1482 power_info = ch_info->power_info;
1483
1484 /* update OFDM Txpower settings */
1485 for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
1486 int delta_idx;
1487
1488 /* limit new power to be no more than h/w capability */
1489 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1490 if (power == power_info->requested_power)
1491 continue;
1492
1493 /* find difference between old and new requested powers,
1494 * update base (non-temp-compensated) power idx */
1495 delta_idx = (power - power_info->requested_power) * 2;
1496 power_info->base_power_idx -= delta_idx;
1497
1498 /* save new requested power value */
1499 power_info->requested_power = power;
1500
1501 power_changed = 1;
1502 }
1503
1504 /* update CCK Txpower settings, based on OFDM 12M setting ...
1505 * ... all CCK power settings for a given channel are the *same*. */
1506 if (power_changed) {
1507 power =
1508 ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
1509 IL_CCK_FROM_OFDM_POWER_DIFF;
1510
1511 /* do all CCK rates' il3945_channel_power_info structures */
1512 for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
1513 power_info->requested_power = power;
1514 power_info->base_power_idx =
1515 ch_info->power_info[RATE_12M_IDX_TBL].
1516 base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
1517 ++power_info;
1518 }
1519 }
1520
1521 return 0;
1522}
1523
1524/**
1525 * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1526 *
1527 * NOTE: Returned power limit may be less (but not more) than requested,
1528 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1529 * (no consideration for h/w clipping limitations).
1530 */
1531static int
1532il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
1533{
1534 s8 max_power;
1535
1536#if 0
1537 /* if we're using TGd limits, use lower of TGd or EEPROM */
1538 if (ch_info->tgd_data.max_power != 0)
1539 max_power =
1540 min(ch_info->tgd_data.max_power,
1541 ch_info->eeprom.max_power_avg);
1542
1543 /* else just use EEPROM limits */
1544 else
1545#endif
1546 max_power = ch_info->eeprom.max_power_avg;
1547
1548 return min(max_power, ch_info->max_power_avg);
1549}
1550
1551/**
1552 * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
1553 *
1554 * Compensate txpower settings of *all* channels for temperature.
1555 * This only accounts for the difference between current temperature
1556 * and the factory calibration temperatures, and bases the new settings
1557 * on the channel's base_power_idx.
1558 *
1559 * If RxOn is "associated", this sends the new Txpower to NIC!
1560 */
1561static int
1562il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
1563{
1564 struct il_channel_info *ch_info = NULL;
1565 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1566 int delta_idx;
1567 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1568 u8 a_band;
1569 u8 rate_idx;
1570 u8 scan_tbl_idx;
1571 u8 i;
1572 int ref_temp;
1573 int temperature = il->temperature;
1574
1575 if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
1576 /* do not perform tx power calibration */
1577 return 0;
1578 }
1579 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1580 for (i = 0; i < il->channel_count; i++) {
1581 ch_info = &il->channel_info[i];
1582 a_band = il_is_channel_a_band(ch_info);
1583
1584 /* Get this chnlgrp's factory calibration temperature */
1585 ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
1586
1587 /* get power idx adjustment based on current and factory
1588 * temps */
1589 delta_idx =
1590 il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
1591
1592 /* set tx power value for all rates, OFDM and CCK */
1593 for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
1594 int power_idx =
1595 ch_info->power_info[rate_idx].base_power_idx;
1596
1597 /* temperature compensate */
1598 power_idx += delta_idx;
1599
1600 /* stay within table range */
1601 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1602 ch_info->power_info[rate_idx].power_table_idx =
1603 (u8) power_idx;
1604 ch_info->power_info[rate_idx].tpc =
1605 power_gain_table[a_band][power_idx];
1606 }
1607
1608 /* Get this chnlgrp's rate-to-max/clip-powers table */
1609 clip_pwrs =
1610 il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1611
1612 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1613 for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
1614 scan_tbl_idx++) {
1615 s32 actual_idx =
1616 (scan_tbl_idx ==
1617 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
1618 il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
1619 actual_idx, clip_pwrs,
1620 ch_info, a_band);
1621 }
1622 }
1623
1624 /* send Txpower command for current channel to ucode */
1625 return il->cfg->ops->lib->send_tx_power(il);
1626}
1627
1628int
1629il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
1630{
1631 struct il_channel_info *ch_info;
1632 s8 max_power;
1633 u8 a_band;
1634 u8 i;
1635
1636 if (il->tx_power_user_lmt == power) {
1637 D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
1638 power);
1639 return 0;
1640 }
1641
1642 D_POWER("Setting upper limit clamp to %ddBm.\n", power);
1643 il->tx_power_user_lmt = power;
1644
1645 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1646
1647 for (i = 0; i < il->channel_count; i++) {
1648 ch_info = &il->channel_info[i];
1649 a_band = il_is_channel_a_band(ch_info);
1650
1651 /* find minimum power of all user and regulatory constraints
1652 * (does not consider h/w clipping limitations) */
1653 max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
1654 max_power = min(power, max_power);
1655 if (max_power != ch_info->curr_txpow) {
1656 ch_info->curr_txpow = max_power;
1657
1658 /* this considers the h/w clipping limitations */
1659 il3945_hw_reg_set_new_power(il, ch_info);
1660 }
1661 }
1662
1663 /* update txpower settings for all channels,
1664 * send to NIC if associated. */
1665 il3945_is_temp_calib_needed(il);
1666 il3945_hw_reg_comp_txpower_temp(il);
1667
1668 return 0;
1669}
1670
1671static int
1672il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
1673{
1674 int rc = 0;
1675 struct il_rx_pkt *pkt;
1676 struct il3945_rxon_assoc_cmd rxon_assoc;
1677 struct il_host_cmd cmd = {
1678 .id = C_RXON_ASSOC,
1679 .len = sizeof(rxon_assoc),
1680 .flags = CMD_WANT_SKB,
1681 .data = &rxon_assoc,
1682 };
1683 const struct il_rxon_cmd *rxon1 = &ctx->staging;
1684 const struct il_rxon_cmd *rxon2 = &ctx->active;
1685
1686 if (rxon1->flags == rxon2->flags &&
1687 rxon1->filter_flags == rxon2->filter_flags &&
1688 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
1689 rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
1690 D_INFO("Using current RXON_ASSOC. Not resending.\n");
1691 return 0;
1692 }
1693
1694 rxon_assoc.flags = ctx->staging.flags;
1695 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1696 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1697 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1698 rxon_assoc.reserved = 0;
1699
1700 rc = il_send_cmd_sync(il, &cmd);
1701 if (rc)
1702 return rc;
1703
1704 pkt = (struct il_rx_pkt *)cmd.reply_page;
1705 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1706 IL_ERR("Bad return from C_RXON_ASSOC command\n");
1707 rc = -EIO;
1708 }
1709
1710 il_free_pages(il, cmd.reply_page);
1711
1712 return rc;
1713}
1714
1715/**
1716 * il3945_commit_rxon - commit staging_rxon to hardware
1717 *
1718 * The RXON command in staging_rxon is committed to the hardware and
1719 * the active_rxon structure is updated with the new data. This
1720 * function correctly transitions out of the RXON_ASSOC_MSK state if
1721 * a HW tune is required based on the RXON structure changes.
1722 */
1723int
1724il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
1725{
1726 /* cast away the const for active_rxon in this function */
1727 struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1728 struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1729 int rc = 0;
1730 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1731
1732 if (test_bit(S_EXIT_PENDING, &il->status))
1733 return -EINVAL;
1734
1735 if (!il_is_alive(il))
1736 return -1;
1737
1738 /* always get timestamp with Rx frame */
1739 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1740
1741 /* select antenna */
1742 staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1743 staging_rxon->flags |= il3945_get_antenna_flags(il);
1744
1745 rc = il_check_rxon_cmd(il, ctx);
1746 if (rc) {
1747 IL_ERR("Invalid RXON configuration. Not committing.\n");
1748 return -EINVAL;
1749 }
1750
1751 /* If we don't need to send a full RXON, we can use
1752 * il3945_rxon_assoc_cmd which is used to reconfigure filter
1753 * and other flags for the current radio configuration. */
1754 if (!il_full_rxon_required(il, &il->ctx)) {
1755 rc = il_send_rxon_assoc(il, &il->ctx);
1756 if (rc) {
1757 IL_ERR("Error setting RXON_ASSOC "
1758 "configuration (%d).\n", rc);
1759 return rc;
1760 }
1761
1762 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1763 /*
1764 * We do not commit tx power settings while channel changing,
1765 * do it now if tx power changed.
1766 */
1767 il_set_tx_power(il, il->tx_power_next, false);
1768 return 0;
1769 }
1770
1771 /* If we are currently associated and the new config requires
1772 * an RXON_ASSOC and the new config wants the associated mask enabled,
1773 * we must clear the associated from the active configuration
1774 * before we apply the new config */
1775 if (il_is_associated(il) && new_assoc) {
1776 D_INFO("Toggling associated bit on current RXON\n");
1777 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1778
1779 /*
1780 * reserved4 and 5 could have been filled by the iwlcore code.
1781 * Let's clear them before pushing to the 3945.
1782 */
1783 active_rxon->reserved4 = 0;
1784 active_rxon->reserved5 = 0;
1785 rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1786 &il->ctx.active);
1787
1788 /* If the mask clearing failed then we set
1789 * active_rxon back to what it was previously */
1790 if (rc) {
1791 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1792 IL_ERR("Error clearing ASSOC_MSK on current "
1793 "configuration (%d).\n", rc);
1794 return rc;
1795 }
1796 il_clear_ucode_stations(il, &il->ctx);
1797 il_restore_stations(il, &il->ctx);
1798 }
1799
1800 D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
1801 "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
1802 le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
1803
1804 /*
1805 * reserved4 and 5 could have been filled by the iwlcore code.
1806 * Let's clear them before pushing to the 3945.
1807 */
1808 staging_rxon->reserved4 = 0;
1809 staging_rxon->reserved5 = 0;
1810
1811 il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
1812
1813 /* Apply the new configuration */
1814 rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1815 staging_rxon);
1816 if (rc) {
1817 IL_ERR("Error setting new configuration (%d).\n", rc);
1818 return rc;
1819 }
1820
1821 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1822
1823 if (!new_assoc) {
1824 il_clear_ucode_stations(il, &il->ctx);
1825 il_restore_stations(il, &il->ctx);
1826 }
1827
1828 /* If we issue a new RXON command which required a tune then we must
1829 * send a new TXPOWER command or we won't be able to Tx any frames */
1830 rc = il_set_tx_power(il, il->tx_power_next, true);
1831 if (rc) {
1832 IL_ERR("Error setting Tx power (%d).\n", rc);
1833 return rc;
1834 }
1835
1836 /* Init the hardware's rate fallback order based on the band */
1837 rc = il3945_init_hw_rate_table(il);
1838 if (rc) {
1839 IL_ERR("Error setting HW rate table: %02X\n", rc);
1840 return -EIO;
1841 }
1842
1843 return 0;
1844}
1845
1846/**
1847 * il3945_reg_txpower_periodic - called when time to check our temperature.
1848 *
1849 * -- reset periodic timer
1850 * -- see if temp has changed enough to warrant re-calibration ... if so:
1851 * -- correct coeffs for temp (can reset temp timer)
1852 * -- save this temp as "last",
1853 * -- send new set of gain settings to NIC
1854 * NOTE: This should continue working, even when we're not associated,
1855 * so we can keep our internal table of scan powers current. */
1856void
1857il3945_reg_txpower_periodic(struct il_priv *il)
1858{
1859 /* This will kick in the "brute force"
1860 * il3945_hw_reg_comp_txpower_temp() below */
1861 if (!il3945_is_temp_calib_needed(il))
1862 goto reschedule;
1863
1864 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1865 * This is based *only* on current temperature,
1866 * ignoring any previous power measurements */
1867 il3945_hw_reg_comp_txpower_temp(il);
1868
1869reschedule:
1870 queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
1871 REG_RECALIB_PERIOD * HZ);
1872}
1873
1874static void
1875il3945_bg_reg_txpower_periodic(struct work_struct *work)
1876{
1877 struct il_priv *il = container_of(work, struct il_priv,
1878 _3945.thermal_periodic.work);
1879
1880 if (test_bit(S_EXIT_PENDING, &il->status))
1881 return;
1882
1883 mutex_lock(&il->mutex);
1884 il3945_reg_txpower_periodic(il);
1885 mutex_unlock(&il->mutex);
1886}
1887
1888/**
1889 * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
1890 *
1891 * This function is used when initializing channel-info structs.
1892 *
1893 * NOTE: These channel groups do *NOT* match the bands above!
1894 * These channel groups are based on factory-tested channels;
1895 * on A-band, EEPROM's "group frequency" entries represent the top
1896 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1897 */
1898static u16
1899il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
1900 const struct il_channel_info *ch_info)
1901{
1902 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1903 struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1904 u8 group;
1905 u16 group_idx = 0; /* based on factory calib frequencies */
1906 u8 grp_channel;
1907
1908 /* Find the group idx for the channel ... don't use idx 1(?) */
1909 if (il_is_channel_a_band(ch_info)) {
1910 for (group = 1; group < 5; group++) {
1911 grp_channel = ch_grp[group].group_channel;
1912 if (ch_info->channel <= grp_channel) {
1913 group_idx = group;
1914 break;
1915 }
1916 }
1917 /* group 4 has a few channels *above* its factory cal freq */
1918 if (group == 5)
1919 group_idx = 4;
1920 } else
1921 group_idx = 0; /* 2.4 GHz, group 0 */
1922
1923 D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
1924 return group_idx;
1925}
1926
1927/**
1928 * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
1929 *
1930 * Interpolate to get nominal (i.e. at factory calibration temperature) idx
1931 * into radio/DSP gain settings table for requested power.
1932 */
1933static int
1934il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
1935 s32 setting_idx, s32 *new_idx)
1936{
1937 const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
1938 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1939 s32 idx0, idx1;
1940 s32 power = 2 * requested_power;
1941 s32 i;
1942 const struct il3945_eeprom_txpower_sample *samples;
1943 s32 gains0, gains1;
1944 s32 res;
1945 s32 denominator;
1946
1947 chnl_grp = &eeprom->groups[setting_idx];
1948 samples = chnl_grp->samples;
1949 for (i = 0; i < 5; i++) {
1950 if (power == samples[i].power) {
1951 *new_idx = samples[i].gain_idx;
1952 return 0;
1953 }
1954 }
1955
1956 if (power > samples[1].power) {
1957 idx0 = 0;
1958 idx1 = 1;
1959 } else if (power > samples[2].power) {
1960 idx0 = 1;
1961 idx1 = 2;
1962 } else if (power > samples[3].power) {
1963 idx0 = 2;
1964 idx1 = 3;
1965 } else {
1966 idx0 = 3;
1967 idx1 = 4;
1968 }
1969
1970 denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
1971 if (denominator == 0)
1972 return -EINVAL;
1973 gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
1974 gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
1975 res =
1976 gains0 + (gains1 - gains0) * ((s32) power -
1977 (s32) samples[idx0].power) /
1978 denominator + (1 << 18);
1979 *new_idx = res >> 19;
1980 return 0;
1981}
1982
1983static void
1984il3945_hw_reg_init_channel_groups(struct il_priv *il)
1985{
1986 u32 i;
1987 s32 rate_idx;
1988 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1989 const struct il3945_eeprom_txpower_group *group;
1990
1991 D_POWER("Initializing factory calib info from EEPROM\n");
1992
1993 for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
1994 s8 *clip_pwrs; /* table of power levels for each rate */
1995 s8 satur_pwr; /* saturation power for each chnl group */
1996 group = &eeprom->groups[i];
1997
1998 /* sanity check on factory saturation power value */
1999 if (group->saturation_power < 40) {
2000 IL_WARN("Error: saturation power is %d, "
2001 "less than minimum expected 40\n",
2002 group->saturation_power);
2003 return;
2004 }
2005
2006 /*
2007 * Derive requested power levels for each rate, based on
2008 * hardware capabilities (saturation power for band).
2009 * Basic value is 3dB down from saturation, with further
2010 * power reductions for highest 3 data rates. These
2011 * backoffs provide headroom for high rate modulation
2012 * power peaks, without too much distortion (clipping).
2013 */
2014 /* we'll fill in this array with h/w max power levels */
2015 clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
2016
2017 /* divide factory saturation power by 2 to find -3dB level */
2018 satur_pwr = (s8) (group->saturation_power >> 1);
2019
2020 /* fill in channel group's nominal powers for each rate */
2021 for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
2022 rate_idx++, clip_pwrs++) {
2023 switch (rate_idx) {
2024 case RATE_36M_IDX_TBL:
2025 if (i == 0) /* B/G */
2026 *clip_pwrs = satur_pwr;
2027 else /* A */
2028 *clip_pwrs = satur_pwr - 5;
2029 break;
2030 case RATE_48M_IDX_TBL:
2031 if (i == 0)
2032 *clip_pwrs = satur_pwr - 7;
2033 else
2034 *clip_pwrs = satur_pwr - 10;
2035 break;
2036 case RATE_54M_IDX_TBL:
2037 if (i == 0)
2038 *clip_pwrs = satur_pwr - 9;
2039 else
2040 *clip_pwrs = satur_pwr - 12;
2041 break;
2042 default:
2043 *clip_pwrs = satur_pwr;
2044 break;
2045 }
2046 }
2047 }
2048}
2049
2050/**
2051 * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2052 *
2053 * Second pass (during init) to set up il->channel_info
2054 *
2055 * Set up Tx-power settings in our channel info database for each VALID
2056 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2057 * and current temperature.
2058 *
2059 * Since this is based on current temperature (at init time), these values may
2060 * not be valid for very long, but it gives us a starting/default point,
2061 * and allows us to active (i.e. using Tx) scan.
2062 *
2063 * This does *not* write values to NIC, just sets up our internal table.
2064 */
2065int
2066il3945_txpower_set_from_eeprom(struct il_priv *il)
2067{
2068 struct il_channel_info *ch_info = NULL;
2069 struct il3945_channel_power_info *pwr_info;
2070 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
2071 int delta_idx;
2072 u8 rate_idx;
2073 u8 scan_tbl_idx;
2074 const s8 *clip_pwrs; /* array of power levels for each rate */
2075 u8 gain, dsp_atten;
2076 s8 power;
2077 u8 pwr_idx, base_pwr_idx, a_band;
2078 u8 i;
2079 int temperature;
2080
2081 /* save temperature reference,
2082 * so we can determine next time to calibrate */
2083 temperature = il3945_hw_reg_txpower_get_temperature(il);
2084 il->last_temperature = temperature;
2085
2086 il3945_hw_reg_init_channel_groups(il);
2087
2088 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2089 for (i = 0, ch_info = il->channel_info; i < il->channel_count;
2090 i++, ch_info++) {
2091 a_band = il_is_channel_a_band(ch_info);
2092 if (!il_is_channel_valid(ch_info))
2093 continue;
2094
2095 /* find this channel's channel group (*not* "band") idx */
2096 ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
2097
2098 /* Get this chnlgrp's rate->max/clip-powers table */
2099 clip_pwrs =
2100 il->_3945.clip_groups[ch_info->group_idx].clip_powers;
2101
2102 /* calculate power idx *adjustment* value according to
2103 * diff between current temperature and factory temperature */
2104 delta_idx =
2105 il3945_hw_reg_adjust_power_by_temp(temperature,
2106 eeprom->groups[ch_info->
2107 group_idx].
2108 temperature);
2109
2110 D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
2111 delta_idx, temperature + IL_TEMP_CONVERT);
2112
2113 /* set tx power value for all OFDM rates */
2114 for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
2115 s32 uninitialized_var(power_idx);
2116 int rc;
2117
2118 /* use channel group's clip-power table,
2119 * but don't exceed channel's max power */
2120 s8 pwr = min(ch_info->max_power_avg,
2121 clip_pwrs[rate_idx]);
2122
2123 pwr_info = &ch_info->power_info[rate_idx];
2124
2125 /* get base (i.e. at factory-measured temperature)
2126 * power table idx for this rate's power */
2127 rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
2128 ch_info->
2129 group_idx,
2130 &power_idx);
2131 if (rc) {
2132 IL_ERR("Invalid power idx\n");
2133 return rc;
2134 }
2135 pwr_info->base_power_idx = (u8) power_idx;
2136
2137 /* temperature compensate */
2138 power_idx += delta_idx;
2139
2140 /* stay within range of gain table */
2141 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
2142
2143 /* fill 1 OFDM rate's il3945_channel_power_info struct */
2144 pwr_info->requested_power = pwr;
2145 pwr_info->power_table_idx = (u8) power_idx;
2146 pwr_info->tpc.tx_gain =
2147 power_gain_table[a_band][power_idx].tx_gain;
2148 pwr_info->tpc.dsp_atten =
2149 power_gain_table[a_band][power_idx].dsp_atten;
2150 }
2151
2152 /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
2153 pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
2154 power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
2155 pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2156 base_pwr_idx =
2157 pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2158
2159 /* stay within table range */
2160 pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
2161 gain = power_gain_table[a_band][pwr_idx].tx_gain;
2162 dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
2163
2164 /* fill each CCK rate's il3945_channel_power_info structure
2165 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2166 * NOTE: CCK rates start at end of OFDM rates! */
2167 for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
2168 pwr_info =
2169 &ch_info->power_info[rate_idx + IL_OFDM_RATES];
2170 pwr_info->requested_power = power;
2171 pwr_info->power_table_idx = pwr_idx;
2172 pwr_info->base_power_idx = base_pwr_idx;
2173 pwr_info->tpc.tx_gain = gain;
2174 pwr_info->tpc.dsp_atten = dsp_atten;
2175 }
2176
2177 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2178 for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
2179 scan_tbl_idx++) {
2180 s32 actual_idx =
2181 (scan_tbl_idx ==
2182 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
2183 il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
2184 actual_idx, clip_pwrs,
2185 ch_info, a_band);
2186 }
2187 }
2188
2189 return 0;
2190}
2191
2192int
2193il3945_hw_rxq_stop(struct il_priv *il)
2194{
2195 int rc;
2196
2197 il_wr(il, FH39_RCSR_CONFIG(0), 0);
2198 rc = il_poll_bit(il, FH39_RSSR_STATUS,
2199 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2200 if (rc < 0)
2201 IL_ERR("Can't stop Rx DMA.\n");
2202
2203 return 0;
2204}
2205
2206int
2207il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
2208{
2209 int txq_id = txq->q.id;
2210
2211 struct il3945_shared *shared_data = il->_3945.shared_virt;
2212
2213 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
2214
2215 il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
2216 il_wr(il, FH39_CBCC_BASE(txq_id), 0);
2217
2218 il_wr(il, FH39_TCSR_CONFIG(txq_id),
2219 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2220 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2221 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2222 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2223 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2224
2225 /* fake read to flush all prev. writes */
2226 _il_rd(il, FH39_TSSR_CBB_BASE);
2227
2228 return 0;
2229}
2230
2231/*
2232 * HCMD utils
2233 */
2234static u16
2235il3945_get_hcmd_size(u8 cmd_id, u16 len)
2236{
2237 switch (cmd_id) {
2238 case C_RXON:
2239 return sizeof(struct il3945_rxon_cmd);
2240 case C_POWER_TBL:
2241 return sizeof(struct il3945_powertable_cmd);
2242 default:
2243 return len;
2244 }
2245}
2246
2247static u16
2248il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
2249{
2250 struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
2251 addsta->mode = cmd->mode;
2252 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2253 memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
2254 addsta->station_flags = cmd->station_flags;
2255 addsta->station_flags_msk = cmd->station_flags_msk;
2256 addsta->tid_disable_tx = cpu_to_le16(0);
2257 addsta->rate_n_flags = cmd->rate_n_flags;
2258 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2259 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2260 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2261
2262 return (u16) sizeof(struct il3945_addsta_cmd);
2263}
2264
2265static int
2266il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
2267{
2268 struct il_rxon_context *ctx = &il->ctx;
2269 int ret;
2270 u8 sta_id;
2271 unsigned long flags;
2272
2273 if (sta_id_r)
2274 *sta_id_r = IL_INVALID_STATION;
2275
2276 ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
2277 if (ret) {
2278 IL_ERR("Unable to add station %pM\n", addr);
2279 return ret;
2280 }
2281
2282 if (sta_id_r)
2283 *sta_id_r = sta_id;
2284
2285 spin_lock_irqsave(&il->sta_lock, flags);
2286 il->stations[sta_id].used |= IL_STA_LOCAL;
2287 spin_unlock_irqrestore(&il->sta_lock, flags);
2288
2289 return 0;
2290}
2291
2292static int
2293il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
2294 bool add)
2295{
2296 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
2297 int ret;
2298
2299 if (add) {
2300 ret =
2301 il3945_add_bssid_station(il, vif->bss_conf.bssid,
2302 &vif_priv->ibss_bssid_sta_id);
2303 if (ret)
2304 return ret;
2305
2306 il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
2307 (il->band ==
2308 IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
2309 RATE_1M_PLCP);
2310 il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
2311
2312 return 0;
2313 }
2314
2315 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
2316 vif->bss_conf.bssid);
2317}
2318
2319/**
2320 * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
2321 */
2322int
2323il3945_init_hw_rate_table(struct il_priv *il)
2324{
2325 int rc, i, idx, prev_idx;
2326 struct il3945_rate_scaling_cmd rate_cmd = {
2327 .reserved = {0, 0, 0},
2328 };
2329 struct il3945_rate_scaling_info *table = rate_cmd.table;
2330
2331 for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
2332 idx = il3945_rates[i].table_rs_idx;
2333
2334 table[idx].rate_n_flags =
2335 il3945_hw_set_rate_n_flags(il3945_rates[i].plcp, 0);
2336 table[idx].try_cnt = il->retry_rate;
2337 prev_idx = il3945_get_prev_ieee_rate(i);
2338 table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
2339 }
2340
2341 switch (il->band) {
2342 case IEEE80211_BAND_5GHZ:
2343 D_RATE("Select A mode rate scale\n");
2344 /* If one of the following CCK rates is used,
2345 * have it fall back to the 6M OFDM rate */
2346 for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
2347 table[i].next_rate_idx =
2348 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2349
2350 /* Don't fall back to CCK rates */
2351 table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
2352
2353 /* Don't drop out of OFDM rates */
2354 table[RATE_6M_IDX_TBL].next_rate_idx =
2355 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2356 break;
2357
2358 case IEEE80211_BAND_2GHZ:
2359 D_RATE("Select B/G mode rate scale\n");
2360 /* If an OFDM rate is used, have it fall back to the
2361 * 1M CCK rates */
2362
2363 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
2364 il_is_associated(il)) {
2365
2366 idx = IL_FIRST_CCK_RATE;
2367 for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
2368 table[i].next_rate_idx =
2369 il3945_rates[idx].table_rs_idx;
2370
2371 idx = RATE_11M_IDX_TBL;
2372 /* CCK shouldn't fall back to OFDM... */
2373 table[idx].next_rate_idx = RATE_5M_IDX_TBL;
2374 }
2375 break;
2376
2377 default:
2378 WARN_ON(1);
2379 break;
2380 }
2381
2382 /* Update the rate scaling for control frame Tx */
2383 rate_cmd.table_id = 0;
2384 rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2385 if (rc)
2386 return rc;
2387
2388 /* Update the rate scaling for data frame Tx */
2389 rate_cmd.table_id = 1;
2390 return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2391}
2392
2393/* Called when initializing driver */
2394int
2395il3945_hw_set_hw_params(struct il_priv *il)
2396{
2397 memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
2398
2399 il->_3945.shared_virt =
2400 dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
2401 &il->_3945.shared_phys, GFP_KERNEL);
2402 if (!il->_3945.shared_virt) {
2403 IL_ERR("failed to allocate pci memory\n");
2404 return -ENOMEM;
2405 }
2406
2407 /* Assign number of Usable TX queues */
2408 il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
2409
2410 il->hw_params.tfd_size = sizeof(struct il3945_tfd);
2411 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
2412 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2413 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2414 il->hw_params.max_stations = IL3945_STATION_COUNT;
2415 il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
2416
2417 il->sta_key_max_num = STA_KEY_MAX_NUM;
2418
2419 il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2420 il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
2421 il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
2422
2423 return 0;
2424}
2425
2426unsigned int
2427il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
2428 u8 rate)
2429{
2430 struct il3945_tx_beacon_cmd *tx_beacon_cmd;
2431 unsigned int frame_size;
2432
2433 tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
2434 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2435
2436 tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
2437 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2438
2439 frame_size =
2440 il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
2441 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2442
2443 BUG_ON(frame_size > MAX_MPDU_SIZE);
2444 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
2445
2446 tx_beacon_cmd->tx.rate = rate;
2447 tx_beacon_cmd->tx.tx_flags =
2448 (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
2449
2450 /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
2451 tx_beacon_cmd->tx.supp_rates[0] =
2452 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
2453
2454 tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
2455
2456 return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
2457}
2458
2459void
2460il3945_hw_handler_setup(struct il_priv *il)
2461{
2462 il->handlers[C_TX] = il3945_hdl_tx;
2463 il->handlers[N_3945_RX] = il3945_hdl_rx;
2464}
2465
2466void
2467il3945_hw_setup_deferred_work(struct il_priv *il)
2468{
2469 INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
2470 il3945_bg_reg_txpower_periodic);
2471}
2472
2473void
2474il3945_hw_cancel_deferred_work(struct il_priv *il)
2475{
2476 cancel_delayed_work(&il->_3945.thermal_periodic);
2477}
2478
2479/* check contents of special bootstrap uCode SRAM */
2480static int
2481il3945_verify_bsm(struct il_priv *il)
2482{
2483 __le32 *image = il->ucode_boot.v_addr;
2484 u32 len = il->ucode_boot.len;
2485 u32 reg;
2486 u32 val;
2487
2488 D_INFO("Begin verify bsm\n");
2489
2490 /* verify BSM SRAM contents */
2491 val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
2492 for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
2493 reg += sizeof(u32), image++) {
2494 val = il_rd_prph(il, reg);
2495 if (val != le32_to_cpu(*image)) {
2496 IL_ERR("BSM uCode verification failed at "
2497 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2498 BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
2499 len, val, le32_to_cpu(*image));
2500 return -EIO;
2501 }
2502 }
2503
2504 D_INFO("BSM bootstrap uCode image OK\n");
2505
2506 return 0;
2507}
2508
2509/******************************************************************************
2510 *
2511 * EEPROM related functions
2512 *
2513 ******************************************************************************/
2514
2515/*
2516 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2517 * embedded controller) as EEPROM reader; each read is a series of pulses
2518 * to/from the EEPROM chip, not a single event, so even reads could conflict
2519 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2520 * simply claims ownership, which should be safe when this function is called
2521 * (i.e. before loading uCode!).
2522 */
2523static int
2524il3945_eeprom_acquire_semaphore(struct il_priv *il)
2525{
2526 _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2527 return 0;
2528}
2529
2530static void
2531il3945_eeprom_release_semaphore(struct il_priv *il)
2532{
2533 return;
2534}
2535
2536 /**
2537 * il3945_load_bsm - Load bootstrap instructions
2538 *
2539 * BSM operation:
2540 *
2541 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2542 * in special SRAM that does not power down during RFKILL. When powering back
2543 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2544 * the bootstrap program into the on-board processor, and starts it.
2545 *
2546 * The bootstrap program loads (via DMA) instructions and data for a new
2547 * program from host DRAM locations indicated by the host driver in the
2548 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2549 * automatically.
2550 *
2551 * When initializing the NIC, the host driver points the BSM to the
2552 * "initialize" uCode image. This uCode sets up some internal data, then
2553 * notifies host via "initialize alive" that it is complete.
2554 *
2555 * The host then replaces the BSM_DRAM_* pointer values to point to the
2556 * normal runtime uCode instructions and a backup uCode data cache buffer
2557 * (filled initially with starting data values for the on-board processor),
2558 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2559 * which begins normal operation.
2560 *
2561 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2562 * the backup data cache in DRAM before SRAM is powered down.
2563 *
2564 * When powering back up, the BSM loads the bootstrap program. This reloads
2565 * the runtime uCode instructions and the backup data cache into SRAM,
2566 * and re-launches the runtime uCode from where it left off.
2567 */
2568static int
2569il3945_load_bsm(struct il_priv *il)
2570{
2571 __le32 *image = il->ucode_boot.v_addr;
2572 u32 len = il->ucode_boot.len;
2573 dma_addr_t pinst;
2574 dma_addr_t pdata;
2575 u32 inst_len;
2576 u32 data_len;
2577 int rc;
2578 int i;
2579 u32 done;
2580 u32 reg_offset;
2581
2582 D_INFO("Begin load bsm\n");
2583
2584 /* make sure bootstrap program is no larger than BSM's SRAM size */
2585 if (len > IL39_MAX_BSM_SIZE)
2586 return -EINVAL;
2587
2588 /* Tell bootstrap uCode where to find the "Initialize" uCode
2589 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2590 * NOTE: il3945_initialize_alive_start() will replace these values,
2591 * after the "initialize" uCode has run, to point to
2592 * runtime/protocol instructions and backup data cache. */
2593 pinst = il->ucode_init.p_addr;
2594 pdata = il->ucode_init_data.p_addr;
2595 inst_len = il->ucode_init.len;
2596 data_len = il->ucode_init_data.len;
2597
2598 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2599 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2600 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2601 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2602
2603 /* Fill BSM memory with bootstrap instructions */
2604 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2605 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2606 reg_offset += sizeof(u32), image++)
2607 _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
2608
2609 rc = il3945_verify_bsm(il);
2610 if (rc)
2611 return rc;
2612
2613 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2614 il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
2615 il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
2616 il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2617
2618 /* Load bootstrap code into instruction SRAM now,
2619 * to prepare to load "initialize" uCode */
2620 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2621
2622 /* Wait for load of bootstrap uCode to finish */
2623 for (i = 0; i < 100; i++) {
2624 done = il_rd_prph(il, BSM_WR_CTRL_REG);
2625 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2626 break;
2627 udelay(10);
2628 }
2629 if (i < 100)
2630 D_INFO("BSM write complete, poll %d iterations\n", i);
2631 else {
2632 IL_ERR("BSM write did not complete!\n");
2633 return -EIO;
2634 }
2635
2636 /* Enable future boot loads whenever power management unit triggers it
2637 * (e.g. when powering back up after power-save shutdown) */
2638 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2639
2640 return 0;
2641}
2642
2643static struct il_hcmd_ops il3945_hcmd = {
2644 .rxon_assoc = il3945_send_rxon_assoc,
2645 .commit_rxon = il3945_commit_rxon,
2646};
2647
2648static struct il_lib_ops il3945_lib = {
2649 .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
2650 .txq_free_tfd = il3945_hw_txq_free_tfd,
2651 .txq_init = il3945_hw_tx_queue_init,
2652 .load_ucode = il3945_load_bsm,
2653 .dump_nic_error_log = il3945_dump_nic_error_log,
2654 .apm_ops = {
2655 .init = il3945_apm_init,
2656 .config = il3945_nic_config,
2657 },
2658 .eeprom_ops = {
2659 .regulatory_bands = {
2660 EEPROM_REGULATORY_BAND_1_CHANNELS,
2661 EEPROM_REGULATORY_BAND_2_CHANNELS,
2662 EEPROM_REGULATORY_BAND_3_CHANNELS,
2663 EEPROM_REGULATORY_BAND_4_CHANNELS,
2664 EEPROM_REGULATORY_BAND_5_CHANNELS,
2665 EEPROM_REGULATORY_BAND_NO_HT40,
2666 EEPROM_REGULATORY_BAND_NO_HT40,
2667 },
2668 .acquire_semaphore = il3945_eeprom_acquire_semaphore,
2669 .release_semaphore = il3945_eeprom_release_semaphore,
2670 },
2671 .send_tx_power = il3945_send_tx_power,
2672 .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
2673
2674#ifdef CONFIG_IWLEGACY_DEBUGFS
2675 .debugfs_ops = {
2676 .rx_stats_read = il3945_ucode_rx_stats_read,
2677 .tx_stats_read = il3945_ucode_tx_stats_read,
2678 .general_stats_read = il3945_ucode_general_stats_read,
2679 },
2680#endif
2681};
2682
2683static const struct il_legacy_ops il3945_legacy_ops = {
2684 .post_associate = il3945_post_associate,
2685 .config_ap = il3945_config_ap,
2686 .manage_ibss_station = il3945_manage_ibss_station,
2687};
2688
2689static struct il_hcmd_utils_ops il3945_hcmd_utils = {
2690 .get_hcmd_size = il3945_get_hcmd_size,
2691 .build_addsta_hcmd = il3945_build_addsta_hcmd,
2692 .request_scan = il3945_request_scan,
2693 .post_scan = il3945_post_scan,
2694};
2695
2696static const struct il_ops il3945_ops = {
2697 .lib = &il3945_lib,
2698 .hcmd = &il3945_hcmd,
2699 .utils = &il3945_hcmd_utils,
2700 .led = &il3945_led_ops,
2701 .legacy = &il3945_legacy_ops,
2702 .ieee80211_ops = &il3945_hw_ops,
2703};
2704
2705static struct il_base_params il3945_base_params = {
2706 .eeprom_size = IL3945_EEPROM_IMG_SIZE,
2707 .num_of_queues = IL39_NUM_QUEUES,
2708 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2709 .set_l0s = false,
2710 .use_bsm = true,
2711 .led_compensation = 64,
2712 .wd_timeout = IL_DEF_WD_TIMEOUT,
2713};
2714
2715static struct il_cfg il3945_bg_cfg = {
2716 .name = "3945BG",
2717 .fw_name_pre = IL3945_FW_PRE,
2718 .ucode_api_max = IL3945_UCODE_API_MAX,
2719 .ucode_api_min = IL3945_UCODE_API_MIN,
2720 .sku = IL_SKU_G,
2721 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2722 .ops = &il3945_ops,
2723 .mod_params = &il3945_mod_params,
2724 .base_params = &il3945_base_params,
2725 .led_mode = IL_LED_BLINK,
2726};
2727
2728static struct il_cfg il3945_abg_cfg = {
2729 .name = "3945ABG",
2730 .fw_name_pre = IL3945_FW_PRE,
2731 .ucode_api_max = IL3945_UCODE_API_MAX,
2732 .ucode_api_min = IL3945_UCODE_API_MIN,
2733 .sku = IL_SKU_A | IL_SKU_G,
2734 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2735 .ops = &il3945_ops,
2736 .mod_params = &il3945_mod_params,
2737 .base_params = &il3945_base_params,
2738 .led_mode = IL_LED_BLINK,
2739};
2740
2741DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
2742 {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
2743 {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
2744 {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
2745 {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
2746 {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
2747 {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
2748 {0}
2749};
2750
2751MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
new file mode 100644
index 000000000000..2b2895c544d7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -0,0 +1,626 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __il_3945_h__
28#define __il_3945_h__
29
30#include <linux/pci.h> /* for struct pci_device_id */
31#include <linux/kernel.h>
32#include <net/ieee80211_radiotap.h>
33
34/* Hardware specific file defines the PCI IDs table for that hardware module */
35extern const struct pci_device_id il3945_hw_card_ids[];
36
37#include "common.h"
38
39/* Highest firmware API version supported */
40#define IL3945_UCODE_API_MAX 2
41
42/* Lowest firmware API version supported */
43#define IL3945_UCODE_API_MIN 1
44
45#define IL3945_FW_PRE "iwlwifi-3945-"
46#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
47#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
48
49/* Default noise level to report when noise measurement is not available.
50 * This may be because we're:
51 * 1) Not associated (4965, no beacon stats being sent to driver)
52 * 2) Scanning (noise measurement does not apply to associated channel)
53 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
54 * Use default noise value of -127 ... this is below the range of measurable
55 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
56 * Also, -127 works better than 0 when averaging frames with/without
57 * noise info (e.g. averaging might be done in app); measured dBm values are
58 * always negative ... using a negative value as the default keeps all
59 * averages within an s8's (used in some apps) range of negative values. */
60#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
61
62/* Module parameters accessible from iwl-*.c */
63extern struct il_mod_params il3945_mod_params;
64
65struct il3945_rate_scale_data {
66 u64 data;
67 s32 success_counter;
68 s32 success_ratio;
69 s32 counter;
70 s32 average_tpt;
71 unsigned long stamp;
72};
73
74struct il3945_rs_sta {
75 spinlock_t lock;
76 struct il_priv *il;
77 s32 *expected_tpt;
78 unsigned long last_partial_flush;
79 unsigned long last_flush;
80 u32 flush_time;
81 u32 last_tx_packets;
82 u32 tx_packets;
83 u8 tgg;
84 u8 flush_pending;
85 u8 start_rate;
86 struct timer_list rate_scale_flush;
87 struct il3945_rate_scale_data win[RATE_COUNT_3945];
88#ifdef CONFIG_MAC80211_DEBUGFS
89 struct dentry *rs_sta_dbgfs_stats_table_file;
90#endif
91
92 /* used to be in sta_info */
93 int last_txrate_idx;
94};
95
96/*
97 * The common struct MUST be first because it is shared between
98 * 3945 and 4965!
99 */
100struct il3945_sta_priv {
101 struct il_station_priv_common common;
102 struct il3945_rs_sta rs_sta;
103};
104
105enum il3945_antenna {
106 IL_ANTENNA_DIVERSITY,
107 IL_ANTENNA_MAIN,
108 IL_ANTENNA_AUX
109};
110
111/*
112 * RTS threshold here is total size [2347] minus 4 FCS bytes
113 * Per spec:
114 * a value of 0 means RTS on all data/management packets
115 * a value > max MSDU size means no RTS
116 * else RTS for data/management frames where MPDU is larger
117 * than RTS value.
118 */
119#define DEFAULT_RTS_THRESHOLD 2347U
120#define MIN_RTS_THRESHOLD 0U
121#define MAX_RTS_THRESHOLD 2347U
122#define MAX_MSDU_SIZE 2304U
123#define MAX_MPDU_SIZE 2346U
124#define DEFAULT_BEACON_INTERVAL 100U
125#define DEFAULT_SHORT_RETRY_LIMIT 7U
126#define DEFAULT_LONG_RETRY_LIMIT 4U
127
128#define IL_TX_FIFO_AC0 0
129#define IL_TX_FIFO_AC1 1
130#define IL_TX_FIFO_AC2 2
131#define IL_TX_FIFO_AC3 3
132#define IL_TX_FIFO_HCCA_1 5
133#define IL_TX_FIFO_HCCA_2 6
134#define IL_TX_FIFO_NONE 7
135
136#define IEEE80211_DATA_LEN 2304
137#define IEEE80211_4ADDR_LEN 30
138#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
139#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
140
141struct il3945_frame {
142 union {
143 struct ieee80211_hdr frame;
144 struct il3945_tx_beacon_cmd beacon;
145 u8 raw[IEEE80211_FRAME_LEN];
146 u8 cmd[360];
147 } u;
148 struct list_head list;
149};
150
151#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
152#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
153#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
154
155#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
156#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
157#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
158
159#define IL_SUPPORTED_RATES_IE_LEN 8
160
161#define SCAN_INTERVAL 100
162
163#define MAX_TID_COUNT 9
164
165#define IL_INVALID_RATE 0xFF
166#define IL_INVALID_VALUE -1
167
168#define STA_PS_STATUS_WAKE 0
169#define STA_PS_STATUS_SLEEP 1
170
171struct il3945_ibss_seq {
172 u8 mac[ETH_ALEN];
173 u16 seq_num;
174 u16 frag_num;
175 unsigned long packet_time;
176 struct list_head list;
177};
178
179#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
180 x->u.rx_frame.stats.payload + \
181 x->u.rx_frame.stats.phy_count))
182#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
183 IL_RX_HDR(x)->payload + \
184 le16_to_cpu(IL_RX_HDR(x)->len)))
185#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
186#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
187
188/******************************************************************************
189 *
190 * Functions implemented in iwl3945-base.c which are forward declared here
191 * for use by iwl-*.c
192 *
193 *****************************************************************************/
194extern int il3945_calc_db_from_ratio(int sig_ratio);
195extern void il3945_rx_replenish(void *data);
196extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
197extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
198 struct ieee80211_hdr *hdr,
199 int left);
200extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
201 char **buf, bool display);
202extern void il3945_dump_nic_error_log(struct il_priv *il);
203
204/******************************************************************************
205 *
206 * Functions implemented in iwl-[34]*.c which are forward declared here
207 * for use by iwl3945-base.c
208 *
209 * NOTE: The implementation of these functions are hardware specific
210 * which is why they are in the hardware specific files (vs. iwl-base.c)
211 *
212 * Naming convention --
213 * il3945_ <-- Its part of iwlwifi (should be changed to il3945_)
214 * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
215 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
216 * il3945_bg_ <-- Called from work queue context
217 * il3945_mac_ <-- mac80211 callback
218 *
219 ****************************************************************************/
220extern void il3945_hw_handler_setup(struct il_priv *il);
221extern void il3945_hw_setup_deferred_work(struct il_priv *il);
222extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
223extern int il3945_hw_rxq_stop(struct il_priv *il);
224extern int il3945_hw_set_hw_params(struct il_priv *il);
225extern int il3945_hw_nic_init(struct il_priv *il);
226extern int il3945_hw_nic_stop_master(struct il_priv *il);
227extern void il3945_hw_txq_ctx_free(struct il_priv *il);
228extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
229extern int il3945_hw_nic_reset(struct il_priv *il);
230extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
231 struct il_tx_queue *txq,
232 dma_addr_t addr, u16 len, u8 reset,
233 u8 pad);
234extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
235extern int il3945_hw_get_temperature(struct il_priv *il);
236extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
237extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
238 struct il3945_frame *frame,
239 u8 rate);
240void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
241 struct ieee80211_tx_info *info,
242 struct ieee80211_hdr *hdr, int sta_id,
243 int tx_id);
244extern int il3945_hw_reg_send_txpower(struct il_priv *il);
245extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
246extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
247void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
248extern void il3945_disable_events(struct il_priv *il);
249extern int il4965_get_temperature(const struct il_priv *il);
250extern void il3945_post_associate(struct il_priv *il);
251extern void il3945_config_ap(struct il_priv *il);
252
253extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
254
255/**
256 * il3945_hw_find_station - Find station id for a given BSSID
257 * @bssid: MAC address of station ID to find
258 *
259 * NOTE: This should not be hardware specific but the code has
260 * not yet been merged into a single common layer for managing the
261 * station tables.
262 */
263extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
264
265extern struct ieee80211_ops il3945_hw_ops;
266
267extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
268extern int il3945_init_hw_rate_table(struct il_priv *il);
269extern void il3945_reg_txpower_periodic(struct il_priv *il);
270extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
271
272extern int il3945_rs_next_rate(struct il_priv *il, int rate);
273
274/* scanning */
275int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
276void il3945_post_scan(struct il_priv *il);
277
278/* rates */
279extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
280
281/* RSSI to dBm */
282#define IL39_RSSI_OFFSET 95
283
284/*
285 * EEPROM related constants, enums, and structures.
286 */
287#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
288
289/*
290 * Mapping of a Tx power level, at factory calibration temperature,
291 * to a radio/DSP gain table idx.
292 * One for each of 5 "sample" power levels in each band.
293 * v_det is measured at the factory, using the 3945's built-in power amplifier
294 * (PA) output voltage detector. This same detector is used during Tx of
295 * long packets in normal operation to provide feedback as to proper output
296 * level.
297 * Data copied from EEPROM.
298 * DO NOT ALTER THIS STRUCTURE!!!
299 */
300struct il3945_eeprom_txpower_sample {
301 u8 gain_idx; /* idx into power (gain) setup table ... */
302 s8 power; /* ... for this pwr level for this chnl group */
303 u16 v_det; /* PA output voltage */
304} __packed;
305
306/*
307 * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
308 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
309 * Tx power setup code interpolates between the 5 "sample" power levels
310 * to determine the nominal setup for a requested power level.
311 * Data copied from EEPROM.
312 * DO NOT ALTER THIS STRUCTURE!!!
313 */
314struct il3945_eeprom_txpower_group {
315 struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
316 s32 a, b, c, d, e; /* coefficients for voltage->power
317 * formula (signed) */
318 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
319 * frequency (signed) */
320 s8 saturation_power; /* highest power possible by h/w in this
321 * band */
322 u8 group_channel; /* "representative" channel # in this band */
323 s16 temperature; /* h/w temperature at factory calib this band
324 * (signed) */
325} __packed;
326
327/*
328 * Temperature-based Tx-power compensation data, not band-specific.
329 * These coefficients are use to modify a/b/c/d/e coeffs based on
330 * difference between current temperature and factory calib temperature.
331 * Data copied from EEPROM.
332 */
333struct il3945_eeprom_temperature_corr {
334 u32 Ta;
335 u32 Tb;
336 u32 Tc;
337 u32 Td;
338 u32 Te;
339} __packed;
340
341/*
342 * EEPROM map
343 */
344struct il3945_eeprom {
345 u8 reserved0[16];
346 u16 device_id; /* abs.ofs: 16 */
347 u8 reserved1[2];
348 u16 pmc; /* abs.ofs: 20 */
349 u8 reserved2[20];
350 u8 mac_address[6]; /* abs.ofs: 42 */
351 u8 reserved3[58];
352 u16 board_revision; /* abs.ofs: 106 */
353 u8 reserved4[11];
354 u8 board_pba_number[9]; /* abs.ofs: 119 */
355 u8 reserved5[8];
356 u16 version; /* abs.ofs: 136 */
357 u8 sku_cap; /* abs.ofs: 138 */
358 u8 leds_mode; /* abs.ofs: 139 */
359 u16 oem_mode;
360 u16 wowlan_mode; /* abs.ofs: 142 */
361 u16 leds_time_interval; /* abs.ofs: 144 */
362 u8 leds_off_time; /* abs.ofs: 146 */
363 u8 leds_on_time; /* abs.ofs: 147 */
364 u8 almgor_m_version; /* abs.ofs: 148 */
365 u8 antenna_switch_type; /* abs.ofs: 149 */
366 u8 reserved6[42];
367 u8 sku_id[4]; /* abs.ofs: 192 */
368
369/*
370 * Per-channel regulatory data.
371 *
372 * Each channel that *might* be supported by 3945 has a fixed location
373 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
374 * txpower (MSB).
375 *
376 * Entries immediately below are for 20 MHz channel width.
377 *
378 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
379 */
380 u16 band_1_count; /* abs.ofs: 196 */
381 struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
382
383/*
384 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
385 * 5.0 GHz channels 7, 8, 11, 12, 16
386 * (4915-5080MHz) (none of these is ever supported)
387 */
388 u16 band_2_count; /* abs.ofs: 226 */
389 struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
390
391/*
392 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
393 * (5170-5320MHz)
394 */
395 u16 band_3_count; /* abs.ofs: 254 */
396 struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
397
398/*
399 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
400 * (5500-5700MHz)
401 */
402 u16 band_4_count; /* abs.ofs: 280 */
403 struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
404
405/*
406 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
407 * (5725-5825MHz)
408 */
409 u16 band_5_count; /* abs.ofs: 304 */
410 struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
411
412 u8 reserved9[194];
413
414/*
415 * 3945 Txpower calibration data.
416 */
417#define IL_NUM_TX_CALIB_GROUPS 5
418 struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
419/* abs.ofs: 512 */
420 struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
421 u8 reserved16[172]; /* fill out to full 1024 byte block */
422} __packed;
423
424#define IL3945_EEPROM_IMG_SIZE 1024
425
426/* End of EEPROM */
427
428#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
429#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
430
431/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
432#define IL39_NUM_QUEUES 5
433#define IL39_CMD_QUEUE_NUM 4
434
435#define IL_DEFAULT_TX_RETRY 15
436
437/*********************************************/
438
439#define RFD_SIZE 4
440#define NUM_TFD_CHUNKS 4
441
442#define TFD_CTL_COUNT_SET(n) (n << 24)
443#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
444#define TFD_CTL_PAD_SET(n) (n << 28)
445#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
446
447/* Sizes and addresses for instruction and data memory (SRAM) in
448 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
449#define IL39_RTC_INST_LOWER_BOUND (0x000000)
450#define IL39_RTC_INST_UPPER_BOUND (0x014000)
451
452#define IL39_RTC_DATA_LOWER_BOUND (0x800000)
453#define IL39_RTC_DATA_UPPER_BOUND (0x808000)
454
455#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
456 IL39_RTC_INST_LOWER_BOUND)
457#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
458 IL39_RTC_DATA_LOWER_BOUND)
459
460#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
461#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
462
463/* Size of uCode instruction memory in bootstrap state machine */
464#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
465
466static inline int
467il3945_hw_valid_rtc_data_addr(u32 addr)
468{
469 return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
470 addr < IL39_RTC_DATA_UPPER_BOUND);
471}
472
473/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
474 * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
475struct il3945_shared {
476 __le32 tx_base_ptr[8];
477} __packed;
478
479static inline u8
480il3945_hw_get_rate(__le16 rate_n_flags)
481{
482 return le16_to_cpu(rate_n_flags) & 0xFF;
483}
484
485static inline u16
486il3945_hw_get_rate_n_flags(__le16 rate_n_flags)
487{
488 return le16_to_cpu(rate_n_flags);
489}
490
491static inline __le16
492il3945_hw_set_rate_n_flags(u8 rate, u16 flags)
493{
494 return cpu_to_le16((u16) rate | flags);
495}
496
497/************************************/
498/* iwl3945 Flow Handler Definitions */
499/************************************/
500
501/**
502 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
503 * Addresses are offsets from device's PCI hardware base address.
504 */
505#define FH39_MEM_LOWER_BOUND (0x0800)
506#define FH39_MEM_UPPER_BOUND (0x1000)
507
508#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140)
509#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180)
510#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400)
511#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0)
512#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500)
513#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680)
514
515/* TFDB (Transmit Frame Buffer Descriptor) */
516#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \
517 ((_ch) * 2 + (buf)) * 0x28)
518#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch))
519
520/* CBCC channel is [0,2] */
521#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8)
522#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
523#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
524
525/* RCSR channel is [0,2] */
526#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40)
527#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
528#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
529#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
530#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
531
532#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
533
534/* RSSR */
535#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000)
536#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004)
537
538/* TCSR */
539#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20)
540#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
541#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
542#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
543
544/* TSSR */
545#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000)
546#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008)
547#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010)
548
549/* DBM */
550
551#define FH39_SRVC_CHNL (6)
552
553#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
554#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
555
556#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
557
558#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
559
560#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
561
562#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
563
564#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
565
566#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
567
568#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
569#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
570
571#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
572#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
573
574#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
575
576#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
577
578#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
579#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
580
581#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
582
583#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
584
585#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
586#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
587
588#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
589
590#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
591#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
592
593#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
594#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
595
596#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
597#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
598
599#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
600 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
601 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
602
603#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
604
605struct il3945_tfd_tb {
606 __le32 addr;
607 __le32 len;
608} __packed;
609
610struct il3945_tfd {
611 __le32 control_flags;
612 struct il3945_tfd_tb tbs[4];
613 u8 __pad[28];
614} __packed;
615
616#ifdef CONFIG_IWLEGACY_DEBUGFS
617ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
618 size_t count, loff_t *ppos);
619ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
620 size_t count, loff_t *ppos);
621ssize_t il3945_ucode_general_stats_read(struct file *file,
622 char __user *user_buf, size_t count,
623 loff_t *ppos);
624#endif
625
626#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c
index 162d877e6869..d3248e3ef23b 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
+++ b/drivers/net/wireless/iwlegacy/4965-calib.c
@@ -63,15 +63,14 @@
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <net/mac80211.h> 64#include <net/mac80211.h>
65 65
66#include "iwl-dev.h" 66#include "common.h"
67#include "iwl-core.h" 67#include "4965.h"
68#include "iwl-4965-calib.h"
69 68
70/***************************************************************************** 69/*****************************************************************************
71 * INIT calibrations framework 70 * INIT calibrations framework
72 *****************************************************************************/ 71 *****************************************************************************/
73 72
74struct statistics_general_data { 73struct stats_general_data {
75 u32 beacon_silence_rssi_a; 74 u32 beacon_silence_rssi_a;
76 u32 beacon_silence_rssi_b; 75 u32 beacon_silence_rssi_b;
77 u32 beacon_silence_rssi_c; 76 u32 beacon_silence_rssi_c;
@@ -80,14 +79,15 @@ struct statistics_general_data {
80 u32 beacon_energy_c; 79 u32 beacon_energy_c;
81}; 80};
82 81
83void iwl4965_calib_free_results(struct iwl_priv *priv) 82void
83il4965_calib_free_results(struct il_priv *il)
84{ 84{
85 int i; 85 int i;
86 86
87 for (i = 0; i < IWL_CALIB_MAX; i++) { 87 for (i = 0; i < IL_CALIB_MAX; i++) {
88 kfree(priv->calib_results[i].buf); 88 kfree(il->calib_results[i].buf);
89 priv->calib_results[i].buf = NULL; 89 il->calib_results[i].buf = NULL;
90 priv->calib_results[i].buf_len = 0; 90 il->calib_results[i].buf_len = 0;
91 } 91 }
92} 92}
93 93
@@ -103,10 +103,9 @@ void iwl4965_calib_free_results(struct iwl_priv *priv)
103 * enough to receive all of our own network traffic, but not so 103 * enough to receive all of our own network traffic, but not so
104 * high that our DSP gets too busy trying to lock onto non-network 104 * high that our DSP gets too busy trying to lock onto non-network
105 * activity/noise. */ 105 * activity/noise. */
106static int iwl4965_sens_energy_cck(struct iwl_priv *priv, 106static int
107 u32 norm_fa, 107il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
108 u32 rx_enable_time, 108 struct stats_general_data *rx_info)
109 struct statistics_general_data *rx_info)
110{ 109{
111 u32 max_nrg_cck = 0; 110 u32 max_nrg_cck = 0;
112 int i = 0; 111 int i = 0;
@@ -129,22 +128,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
129 u32 false_alarms = norm_fa * 200 * 1024; 128 u32 false_alarms = norm_fa * 200 * 1024;
130 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; 129 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
131 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; 130 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
132 struct iwl_sensitivity_data *data = NULL; 131 struct il_sensitivity_data *data = NULL;
133 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 132 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
134 133
135 data = &(priv->sensitivity_data); 134 data = &(il->sensitivity_data);
136 135
137 data->nrg_auto_corr_silence_diff = 0; 136 data->nrg_auto_corr_silence_diff = 0;
138 137
139 /* Find max silence rssi among all 3 receivers. 138 /* Find max silence rssi among all 3 receivers.
140 * This is background noise, which may include transmissions from other 139 * This is background noise, which may include transmissions from other
141 * networks, measured during silence before our network's beacon */ 140 * networks, measured during silence before our network's beacon */
142 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & 141 silence_rssi_a =
143 ALL_BAND_FILTER) >> 8); 142 (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
144 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & 143 silence_rssi_b =
145 ALL_BAND_FILTER) >> 8); 144 (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
146 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & 145 silence_rssi_c =
147 ALL_BAND_FILTER) >> 8); 146 (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
148 147
149 val = max(silence_rssi_b, silence_rssi_c); 148 val = max(silence_rssi_b, silence_rssi_c);
150 max_silence_rssi = max(silence_rssi_a, (u8) val); 149 max_silence_rssi = max(silence_rssi_a, (u8) val);
@@ -160,9 +159,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
160 val = data->nrg_silence_rssi[i]; 159 val = data->nrg_silence_rssi[i];
161 silence_ref = max(silence_ref, val); 160 silence_ref = max(silence_ref, val);
162 } 161 }
163 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", 162 D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
164 silence_rssi_a, silence_rssi_b, silence_rssi_c, 163 silence_rssi_b, silence_rssi_c, silence_ref);
165 silence_ref);
166 164
167 /* Find max rx energy (min value!) among all 3 receivers, 165 /* Find max rx energy (min value!) among all 3 receivers,
168 * measured during beacon frame. 166 * measured during beacon frame.
@@ -184,9 +182,9 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
184 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); 182 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
185 max_nrg_cck += 6; 183 max_nrg_cck += 6;
186 184
187 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", 185 D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
188 rx_info->beacon_energy_a, rx_info->beacon_energy_b, 186 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
189 rx_info->beacon_energy_c, max_nrg_cck - 6); 187 rx_info->beacon_energy_c, max_nrg_cck - 6);
190 188
191 /* Count number of consecutive beacons with fewer-than-desired 189 /* Count number of consecutive beacons with fewer-than-desired
192 * false alarms. */ 190 * false alarms. */
@@ -194,35 +192,34 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
194 data->num_in_cck_no_fa++; 192 data->num_in_cck_no_fa++;
195 else 193 else
196 data->num_in_cck_no_fa = 0; 194 data->num_in_cck_no_fa = 0;
197 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", 195 D_CALIB("consecutive bcns with few false alarms = %u\n",
198 data->num_in_cck_no_fa); 196 data->num_in_cck_no_fa);
199 197
200 /* If we got too many false alarms this time, reduce sensitivity */ 198 /* If we got too many false alarms this time, reduce sensitivity */
201 if ((false_alarms > max_false_alarms) && 199 if (false_alarms > max_false_alarms &&
202 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { 200 data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
203 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", 201 D_CALIB("norm FA %u > max FA %u\n", false_alarms,
204 false_alarms, max_false_alarms); 202 max_false_alarms);
205 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); 203 D_CALIB("... reducing sensitivity\n");
206 data->nrg_curr_state = IWL_FA_TOO_MANY; 204 data->nrg_curr_state = IL_FA_TOO_MANY;
207 /* Store for "fewer than desired" on later beacon */ 205 /* Store for "fewer than desired" on later beacon */
208 data->nrg_silence_ref = silence_ref; 206 data->nrg_silence_ref = silence_ref;
209 207
210 /* increase energy threshold (reduce nrg value) 208 /* increase energy threshold (reduce nrg value)
211 * to decrease sensitivity */ 209 * to decrease sensitivity */
212 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; 210 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
213 /* Else if we got fewer than desired, increase sensitivity */ 211 /* Else if we got fewer than desired, increase sensitivity */
214 } else if (false_alarms < min_false_alarms) { 212 } else if (false_alarms < min_false_alarms) {
215 data->nrg_curr_state = IWL_FA_TOO_FEW; 213 data->nrg_curr_state = IL_FA_TOO_FEW;
216 214
217 /* Compare silence level with silence level for most recent 215 /* Compare silence level with silence level for most recent
218 * healthy number or too many false alarms */ 216 * healthy number or too many false alarms */
219 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - 217 data->nrg_auto_corr_silence_diff =
220 (s32)silence_ref; 218 (s32) data->nrg_silence_ref - (s32) silence_ref;
221 219
222 IWL_DEBUG_CALIB(priv, 220 D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
223 "norm FA %u < min FA %u, silence diff %d\n", 221 false_alarms, min_false_alarms,
224 false_alarms, min_false_alarms, 222 data->nrg_auto_corr_silence_diff);
225 data->nrg_auto_corr_silence_diff);
226 223
227 /* Increase value to increase sensitivity, but only if: 224 /* Increase value to increase sensitivity, but only if:
228 * 1a) previous beacon did *not* have *too many* false alarms 225 * 1a) previous beacon did *not* have *too many* false alarms
@@ -230,23 +227,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
230 * from a previous beacon with too many, or healthy # FAs 227 * from a previous beacon with too many, or healthy # FAs
231 * OR 2) We've seen a lot of beacons (100) with too few 228 * OR 2) We've seen a lot of beacons (100) with too few
232 * false alarms */ 229 * false alarms */
233 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && 230 if (data->nrg_prev_state != IL_FA_TOO_MANY &&
234 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || 231 (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
235 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { 232 data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
236 233
237 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); 234 D_CALIB("... increasing sensitivity\n");
238 /* Increase nrg value to increase sensitivity */ 235 /* Increase nrg value to increase sensitivity */
239 val = data->nrg_th_cck + NRG_STEP_CCK; 236 val = data->nrg_th_cck + NRG_STEP_CCK;
240 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); 237 data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
241 } else { 238 } else {
242 IWL_DEBUG_CALIB(priv, 239 D_CALIB("... but not changing sensitivity\n");
243 "... but not changing sensitivity\n");
244 } 240 }
245 241
246 /* Else we got a healthy number of false alarms, keep status quo */ 242 /* Else we got a healthy number of false alarms, keep status quo */
247 } else { 243 } else {
248 IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); 244 D_CALIB(" FA in safe zone\n");
249 data->nrg_curr_state = IWL_FA_GOOD_RANGE; 245 data->nrg_curr_state = IL_FA_GOOD_RANGE;
250 246
251 /* Store for use in "fewer than desired" with later beacon */ 247 /* Store for use in "fewer than desired" with later beacon */
252 data->nrg_silence_ref = silence_ref; 248 data->nrg_silence_ref = silence_ref;
@@ -254,8 +250,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
254 /* If previous beacon had too many false alarms, 250 /* If previous beacon had too many false alarms,
255 * give it some extra margin by reducing sensitivity again 251 * give it some extra margin by reducing sensitivity again
256 * (but don't go below measured energy of desired Rx) */ 252 * (but don't go below measured energy of desired Rx) */
257 if (IWL_FA_TOO_MANY == data->nrg_prev_state) { 253 if (IL_FA_TOO_MANY == data->nrg_prev_state) {
258 IWL_DEBUG_CALIB(priv, "... increasing margin\n"); 254 D_CALIB("... increasing margin\n");
259 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) 255 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
260 data->nrg_th_cck -= NRG_MARGIN; 256 data->nrg_th_cck -= NRG_MARGIN;
261 else 257 else
@@ -269,7 +265,7 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
269 * Lower value is higher energy, so we use max()! 265 * Lower value is higher energy, so we use max()!
270 */ 266 */
271 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); 267 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
272 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); 268 D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
273 269
274 data->nrg_prev_state = data->nrg_curr_state; 270 data->nrg_prev_state = data->nrg_curr_state;
275 271
@@ -284,190 +280,187 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
284 else { 280 else {
285 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; 281 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
286 data->auto_corr_cck = 282 data->auto_corr_cck =
287 min((u32)ranges->auto_corr_max_cck, val); 283 min((u32) ranges->auto_corr_max_cck, val);
288 } 284 }
289 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; 285 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
290 data->auto_corr_cck_mrc = 286 data->auto_corr_cck_mrc =
291 min((u32)ranges->auto_corr_max_cck_mrc, val); 287 min((u32) ranges->auto_corr_max_cck_mrc, val);
292 } else if ((false_alarms < min_false_alarms) && 288 } else if (false_alarms < min_false_alarms &&
293 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || 289 (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
294 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { 290 data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
295 291
296 /* Decrease auto_corr values to increase sensitivity */ 292 /* Decrease auto_corr values to increase sensitivity */
297 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; 293 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
298 data->auto_corr_cck = 294 data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
299 max((u32)ranges->auto_corr_min_cck, val);
300 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; 295 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
301 data->auto_corr_cck_mrc = 296 data->auto_corr_cck_mrc =
302 max((u32)ranges->auto_corr_min_cck_mrc, val); 297 max((u32) ranges->auto_corr_min_cck_mrc, val);
303 } 298 }
304 299
305 return 0; 300 return 0;
306} 301}
307 302
308 303static int
309static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, 304il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
310 u32 norm_fa,
311 u32 rx_enable_time)
312{ 305{
313 u32 val; 306 u32 val;
314 u32 false_alarms = norm_fa * 200 * 1024; 307 u32 false_alarms = norm_fa * 200 * 1024;
315 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; 308 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
316 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; 309 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
317 struct iwl_sensitivity_data *data = NULL; 310 struct il_sensitivity_data *data = NULL;
318 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 311 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
319 312
320 data = &(priv->sensitivity_data); 313 data = &(il->sensitivity_data);
321 314
322 /* If we got too many false alarms this time, reduce sensitivity */ 315 /* If we got too many false alarms this time, reduce sensitivity */
323 if (false_alarms > max_false_alarms) { 316 if (false_alarms > max_false_alarms) {
324 317
325 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", 318 D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
326 false_alarms, max_false_alarms); 319 max_false_alarms);
327 320
328 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; 321 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
329 data->auto_corr_ofdm = 322 data->auto_corr_ofdm =
330 min((u32)ranges->auto_corr_max_ofdm, val); 323 min((u32) ranges->auto_corr_max_ofdm, val);
331 324
332 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; 325 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
333 data->auto_corr_ofdm_mrc = 326 data->auto_corr_ofdm_mrc =
334 min((u32)ranges->auto_corr_max_ofdm_mrc, val); 327 min((u32) ranges->auto_corr_max_ofdm_mrc, val);
335 328
336 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; 329 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
337 data->auto_corr_ofdm_x1 = 330 data->auto_corr_ofdm_x1 =
338 min((u32)ranges->auto_corr_max_ofdm_x1, val); 331 min((u32) ranges->auto_corr_max_ofdm_x1, val);
339 332
340 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; 333 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
341 data->auto_corr_ofdm_mrc_x1 = 334 data->auto_corr_ofdm_mrc_x1 =
342 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); 335 min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
343 } 336 }
344 337
345 /* Else if we got fewer than desired, increase sensitivity */ 338 /* Else if we got fewer than desired, increase sensitivity */
346 else if (false_alarms < min_false_alarms) { 339 else if (false_alarms < min_false_alarms) {
347 340
348 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", 341 D_CALIB("norm FA %u < min FA %u\n", false_alarms,
349 false_alarms, min_false_alarms); 342 min_false_alarms);
350 343
351 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; 344 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
352 data->auto_corr_ofdm = 345 data->auto_corr_ofdm =
353 max((u32)ranges->auto_corr_min_ofdm, val); 346 max((u32) ranges->auto_corr_min_ofdm, val);
354 347
355 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; 348 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
356 data->auto_corr_ofdm_mrc = 349 data->auto_corr_ofdm_mrc =
357 max((u32)ranges->auto_corr_min_ofdm_mrc, val); 350 max((u32) ranges->auto_corr_min_ofdm_mrc, val);
358 351
359 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; 352 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
360 data->auto_corr_ofdm_x1 = 353 data->auto_corr_ofdm_x1 =
361 max((u32)ranges->auto_corr_min_ofdm_x1, val); 354 max((u32) ranges->auto_corr_min_ofdm_x1, val);
362 355
363 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; 356 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
364 data->auto_corr_ofdm_mrc_x1 = 357 data->auto_corr_ofdm_mrc_x1 =
365 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); 358 max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
366 } else { 359 } else {
367 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", 360 D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
368 min_false_alarms, false_alarms, max_false_alarms); 361 min_false_alarms, false_alarms, max_false_alarms);
369 } 362 }
370 return 0; 363 return 0;
371} 364}
372 365
373static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, 366static void
374 struct iwl_sensitivity_data *data, 367il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
375 __le16 *tbl) 368 struct il_sensitivity_data *data,
369 __le16 *tbl)
376{ 370{
377 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = 371 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
378 cpu_to_le16((u16)data->auto_corr_ofdm); 372 cpu_to_le16((u16) data->auto_corr_ofdm);
379 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = 373 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
380 cpu_to_le16((u16)data->auto_corr_ofdm_mrc); 374 cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
381 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = 375 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
382 cpu_to_le16((u16)data->auto_corr_ofdm_x1); 376 cpu_to_le16((u16) data->auto_corr_ofdm_x1);
383 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = 377 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
384 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); 378 cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
385 379
386 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = 380 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
387 cpu_to_le16((u16)data->auto_corr_cck); 381 cpu_to_le16((u16) data->auto_corr_cck);
388 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = 382 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
389 cpu_to_le16((u16)data->auto_corr_cck_mrc); 383 cpu_to_le16((u16) data->auto_corr_cck_mrc);
390 384
391 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = 385 tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
392 cpu_to_le16((u16)data->nrg_th_cck); 386 tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
393 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = 387
394 cpu_to_le16((u16)data->nrg_th_ofdm); 388 tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
395 389 cpu_to_le16(data->barker_corr_th_min);
396 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = 390 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
397 cpu_to_le16(data->barker_corr_th_min); 391 cpu_to_le16(data->barker_corr_th_min_mrc);
398 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = 392 tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
399 cpu_to_le16(data->barker_corr_th_min_mrc); 393
400 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = 394 D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
401 cpu_to_le16(data->nrg_th_cca); 395 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
402 396 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
403 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", 397 data->nrg_th_ofdm);
404 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, 398
405 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, 399 D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
406 data->nrg_th_ofdm); 400 data->auto_corr_cck_mrc, data->nrg_th_cck);
407
408 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
409 data->auto_corr_cck, data->auto_corr_cck_mrc,
410 data->nrg_th_cck);
411} 401}
412 402
413/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 403/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
414static int iwl4965_sensitivity_write(struct iwl_priv *priv) 404static int
405il4965_sensitivity_write(struct il_priv *il)
415{ 406{
416 struct iwl_sensitivity_cmd cmd; 407 struct il_sensitivity_cmd cmd;
417 struct iwl_sensitivity_data *data = NULL; 408 struct il_sensitivity_data *data = NULL;
418 struct iwl_host_cmd cmd_out = { 409 struct il_host_cmd cmd_out = {
419 .id = SENSITIVITY_CMD, 410 .id = C_SENSITIVITY,
420 .len = sizeof(struct iwl_sensitivity_cmd), 411 .len = sizeof(struct il_sensitivity_cmd),
421 .flags = CMD_ASYNC, 412 .flags = CMD_ASYNC,
422 .data = &cmd, 413 .data = &cmd,
423 }; 414 };
424 415
425 data = &(priv->sensitivity_data); 416 data = &(il->sensitivity_data);
426 417
427 memset(&cmd, 0, sizeof(cmd)); 418 memset(&cmd, 0, sizeof(cmd));
428 419
429 iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); 420 il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
430 421
431 /* Update uCode's "work" table, and copy it to DSP */ 422 /* Update uCode's "work" table, and copy it to DSP */
432 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; 423 cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
433 424
434 /* Don't send command to uCode if nothing has changed */ 425 /* Don't send command to uCode if nothing has changed */
435 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), 426 if (!memcmp
436 sizeof(u16)*HD_TABLE_SIZE)) { 427 (&cmd.table[0], &(il->sensitivity_tbl[0]),
437 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); 428 sizeof(u16) * HD_TBL_SIZE)) {
429 D_CALIB("No change in C_SENSITIVITY\n");
438 return 0; 430 return 0;
439 } 431 }
440 432
441 /* Copy table for comparison next time */ 433 /* Copy table for comparison next time */
442 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 434 memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
443 sizeof(u16)*HD_TABLE_SIZE); 435 sizeof(u16) * HD_TBL_SIZE);
444 436
445 return iwl_legacy_send_cmd(priv, &cmd_out); 437 return il_send_cmd(il, &cmd_out);
446} 438}
447 439
448void iwl4965_init_sensitivity(struct iwl_priv *priv) 440void
441il4965_init_sensitivity(struct il_priv *il)
449{ 442{
450 int ret = 0; 443 int ret = 0;
451 int i; 444 int i;
452 struct iwl_sensitivity_data *data = NULL; 445 struct il_sensitivity_data *data = NULL;
453 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 446 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
454 447
455 if (priv->disable_sens_cal) 448 if (il->disable_sens_cal)
456 return; 449 return;
457 450
458 IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n"); 451 D_CALIB("Start il4965_init_sensitivity\n");
459 452
460 /* Clear driver's sensitivity algo data */ 453 /* Clear driver's sensitivity algo data */
461 data = &(priv->sensitivity_data); 454 data = &(il->sensitivity_data);
462 455
463 if (ranges == NULL) 456 if (ranges == NULL)
464 return; 457 return;
465 458
466 memset(data, 0, sizeof(struct iwl_sensitivity_data)); 459 memset(data, 0, sizeof(struct il_sensitivity_data));
467 460
468 data->num_in_cck_no_fa = 0; 461 data->num_in_cck_no_fa = 0;
469 data->nrg_curr_state = IWL_FA_TOO_MANY; 462 data->nrg_curr_state = IL_FA_TOO_MANY;
470 data->nrg_prev_state = IWL_FA_TOO_MANY; 463 data->nrg_prev_state = IL_FA_TOO_MANY;
471 data->nrg_silence_ref = 0; 464 data->nrg_silence_ref = 0;
472 data->nrg_silence_idx = 0; 465 data->nrg_silence_idx = 0;
473 data->nrg_energy_idx = 0; 466 data->nrg_energy_idx = 0;
@@ -478,9 +471,9 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
478 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) 471 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
479 data->nrg_silence_rssi[i] = 0; 472 data->nrg_silence_rssi[i] = 0;
480 473
481 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; 474 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
482 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; 475 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
483 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; 476 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
484 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; 477 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
485 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; 478 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
486 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; 479 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
@@ -495,11 +488,12 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
495 data->last_bad_plcp_cnt_cck = 0; 488 data->last_bad_plcp_cnt_cck = 0;
496 data->last_fa_cnt_cck = 0; 489 data->last_fa_cnt_cck = 0;
497 490
498 ret |= iwl4965_sensitivity_write(priv); 491 ret |= il4965_sensitivity_write(il);
499 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret); 492 D_CALIB("<<return 0x%X\n", ret);
500} 493}
501 494
502void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp) 495void
496il4965_sensitivity_calibration(struct il_priv *il, void *resp)
503{ 497{
504 u32 rx_enable_time; 498 u32 rx_enable_time;
505 u32 fa_cck; 499 u32 fa_cck;
@@ -508,31 +502,31 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
508 u32 bad_plcp_ofdm; 502 u32 bad_plcp_ofdm;
509 u32 norm_fa_ofdm; 503 u32 norm_fa_ofdm;
510 u32 norm_fa_cck; 504 u32 norm_fa_cck;
511 struct iwl_sensitivity_data *data = NULL; 505 struct il_sensitivity_data *data = NULL;
512 struct statistics_rx_non_phy *rx_info; 506 struct stats_rx_non_phy *rx_info;
513 struct statistics_rx_phy *ofdm, *cck; 507 struct stats_rx_phy *ofdm, *cck;
514 unsigned long flags; 508 unsigned long flags;
515 struct statistics_general_data statis; 509 struct stats_general_data statis;
516 510
517 if (priv->disable_sens_cal) 511 if (il->disable_sens_cal)
518 return; 512 return;
519 513
520 data = &(priv->sensitivity_data); 514 data = &(il->sensitivity_data);
521 515
522 if (!iwl_legacy_is_any_associated(priv)) { 516 if (!il_is_any_associated(il)) {
523 IWL_DEBUG_CALIB(priv, "<< - not associated\n"); 517 D_CALIB("<< - not associated\n");
524 return; 518 return;
525 } 519 }
526 520
527 spin_lock_irqsave(&priv->lock, flags); 521 spin_lock_irqsave(&il->lock, flags);
528 522
529 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general); 523 rx_info = &(((struct il_notif_stats *)resp)->rx.general);
530 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm); 524 ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
531 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck); 525 cck = &(((struct il_notif_stats *)resp)->rx.cck);
532 526
533 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 527 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
534 IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); 528 D_CALIB("<< invalid data.\n");
535 spin_unlock_irqrestore(&priv->lock, flags); 529 spin_unlock_irqrestore(&il->lock, flags);
536 return; 530 return;
537 } 531 }
538 532
@@ -544,30 +538,27 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
544 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); 538 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
545 539
546 statis.beacon_silence_rssi_a = 540 statis.beacon_silence_rssi_a =
547 le32_to_cpu(rx_info->beacon_silence_rssi_a); 541 le32_to_cpu(rx_info->beacon_silence_rssi_a);
548 statis.beacon_silence_rssi_b = 542 statis.beacon_silence_rssi_b =
549 le32_to_cpu(rx_info->beacon_silence_rssi_b); 543 le32_to_cpu(rx_info->beacon_silence_rssi_b);
550 statis.beacon_silence_rssi_c = 544 statis.beacon_silence_rssi_c =
551 le32_to_cpu(rx_info->beacon_silence_rssi_c); 545 le32_to_cpu(rx_info->beacon_silence_rssi_c);
552 statis.beacon_energy_a = 546 statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
553 le32_to_cpu(rx_info->beacon_energy_a); 547 statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
554 statis.beacon_energy_b = 548 statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
555 le32_to_cpu(rx_info->beacon_energy_b);
556 statis.beacon_energy_c =
557 le32_to_cpu(rx_info->beacon_energy_c);
558 549
559 spin_unlock_irqrestore(&priv->lock, flags); 550 spin_unlock_irqrestore(&il->lock, flags);
560 551
561 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); 552 D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
562 553
563 if (!rx_enable_time) { 554 if (!rx_enable_time) {
564 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); 555 D_CALIB("<< RX Enable Time == 0!\n");
565 return; 556 return;
566 } 557 }
567 558
568 /* These statistics increase monotonically, and do not reset 559 /* These stats increase monotonically, and do not reset
569 * at each beacon. Calculate difference from last value, or just 560 * at each beacon. Calculate difference from last value, or just
570 * use the new statistics value if it has reset or wrapped around. */ 561 * use the new stats value if it has reset or wrapped around. */
571 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) 562 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
572 data->last_bad_plcp_cnt_cck = bad_plcp_cck; 563 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
573 else { 564 else {
@@ -600,17 +591,17 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
600 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; 591 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
601 norm_fa_cck = fa_cck + bad_plcp_cck; 592 norm_fa_cck = fa_cck + bad_plcp_cck;
602 593
603 IWL_DEBUG_CALIB(priv, 594 D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
604 "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, 595 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
605 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
606 596
607 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); 597 il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
608 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); 598 il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
609 599
610 iwl4965_sensitivity_write(priv); 600 il4965_sensitivity_write(il);
611} 601}
612 602
613static inline u8 iwl4965_find_first_chain(u8 mask) 603static inline u8
604il4965_find_first_chain(u8 mask)
614{ 605{
615 if (mask & ANT_A) 606 if (mask & ANT_A)
616 return CHAIN_A; 607 return CHAIN_A;
@@ -624,8 +615,8 @@ static inline u8 iwl4965_find_first_chain(u8 mask)
624 * disconnected. 615 * disconnected.
625 */ 616 */
626static void 617static void
627iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, 618il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
628 struct iwl_chain_noise_data *data) 619 struct il_chain_noise_data *data)
629{ 620{
630 u32 active_chains = 0; 621 u32 active_chains = 0;
631 u32 max_average_sig; 622 u32 max_average_sig;
@@ -634,12 +625,15 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
634 u8 first_chain; 625 u8 first_chain;
635 u16 i = 0; 626 u16 i = 0;
636 627
637 average_sig[0] = data->chain_signal_a / 628 average_sig[0] =
638 priv->cfg->base_params->chain_noise_num_beacons; 629 data->chain_signal_a /
639 average_sig[1] = data->chain_signal_b / 630 il->cfg->base_params->chain_noise_num_beacons;
640 priv->cfg->base_params->chain_noise_num_beacons; 631 average_sig[1] =
641 average_sig[2] = data->chain_signal_c / 632 data->chain_signal_b /
642 priv->cfg->base_params->chain_noise_num_beacons; 633 il->cfg->base_params->chain_noise_num_beacons;
634 average_sig[2] =
635 data->chain_signal_c /
636 il->cfg->base_params->chain_noise_num_beacons;
643 637
644 if (average_sig[0] >= average_sig[1]) { 638 if (average_sig[0] >= average_sig[1]) {
645 max_average_sig = average_sig[0]; 639 max_average_sig = average_sig[0];
@@ -657,10 +651,10 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
657 active_chains = (1 << max_average_sig_antenna_i); 651 active_chains = (1 << max_average_sig_antenna_i);
658 } 652 }
659 653
660 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", 654 D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
661 average_sig[0], average_sig[1], average_sig[2]); 655 average_sig[2]);
662 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", 656 D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
663 max_average_sig, max_average_sig_antenna_i); 657 max_average_sig_antenna_i);
664 658
665 /* Compare signal strengths for all 3 receivers. */ 659 /* Compare signal strengths for all 3 receivers. */
666 for (i = 0; i < NUM_RX_CHAINS; i++) { 660 for (i = 0; i < NUM_RX_CHAINS; i++) {
@@ -673,9 +667,9 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
673 data->disconn_array[i] = 1; 667 data->disconn_array[i] = 1;
674 else 668 else
675 active_chains |= (1 << i); 669 active_chains |= (1 << i);
676 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " 670 D_CALIB("i = %d rssiDelta = %d "
677 "disconn_array[i] = %d\n", 671 "disconn_array[i] = %d\n", i, rssi_delta,
678 i, rssi_delta, data->disconn_array[i]); 672 data->disconn_array[i]);
679 } 673 }
680 } 674 }
681 675
@@ -689,119 +683,110 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
689 * To be safe, simply mask out any chains that we know 683 * To be safe, simply mask out any chains that we know
690 * are not on the device. 684 * are not on the device.
691 */ 685 */
692 active_chains &= priv->hw_params.valid_rx_ant; 686 active_chains &= il->hw_params.valid_rx_ant;
693 687
694 num_tx_chains = 0; 688 num_tx_chains = 0;
695 for (i = 0; i < NUM_RX_CHAINS; i++) { 689 for (i = 0; i < NUM_RX_CHAINS; i++) {
696 /* loops on all the bits of 690 /* loops on all the bits of
697 * priv->hw_setting.valid_tx_ant */ 691 * il->hw_setting.valid_tx_ant */
698 u8 ant_msk = (1 << i); 692 u8 ant_msk = (1 << i);
699 if (!(priv->hw_params.valid_tx_ant & ant_msk)) 693 if (!(il->hw_params.valid_tx_ant & ant_msk))
700 continue; 694 continue;
701 695
702 num_tx_chains++; 696 num_tx_chains++;
703 if (data->disconn_array[i] == 0) 697 if (data->disconn_array[i] == 0)
704 /* there is a Tx antenna connected */ 698 /* there is a Tx antenna connected */
705 break; 699 break;
706 if (num_tx_chains == priv->hw_params.tx_chains_num && 700 if (num_tx_chains == il->hw_params.tx_chains_num &&
707 data->disconn_array[i]) { 701 data->disconn_array[i]) {
708 /* 702 /*
709 * If all chains are disconnected 703 * If all chains are disconnected
710 * connect the first valid tx chain 704 * connect the first valid tx chain
711 */ 705 */
712 first_chain = 706 first_chain =
713 iwl4965_find_first_chain(priv->cfg->valid_tx_ant); 707 il4965_find_first_chain(il->cfg->valid_tx_ant);
714 data->disconn_array[first_chain] = 0; 708 data->disconn_array[first_chain] = 0;
715 active_chains |= BIT(first_chain); 709 active_chains |= BIT(first_chain);
716 IWL_DEBUG_CALIB(priv, 710 D_CALIB("All Tx chains are disconnected"
717 "All Tx chains are disconnected W/A - declare %d as connected\n", 711 "- declare %d as connected\n", first_chain);
718 first_chain);
719 break; 712 break;
720 } 713 }
721 } 714 }
722 715
723 if (active_chains != priv->hw_params.valid_rx_ant && 716 if (active_chains != il->hw_params.valid_rx_ant &&
724 active_chains != priv->chain_noise_data.active_chains) 717 active_chains != il->chain_noise_data.active_chains)
725 IWL_DEBUG_CALIB(priv, 718 D_CALIB("Detected that not all antennas are connected! "
726 "Detected that not all antennas are connected! " 719 "Connected: %#x, valid: %#x.\n", active_chains,
727 "Connected: %#x, valid: %#x.\n", 720 il->hw_params.valid_rx_ant);
728 active_chains, priv->hw_params.valid_rx_ant);
729 721
730 /* Save for use within RXON, TX, SCAN commands, etc. */ 722 /* Save for use within RXON, TX, SCAN commands, etc. */
731 data->active_chains = active_chains; 723 data->active_chains = active_chains;
732 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", 724 D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
733 active_chains);
734} 725}
735 726
736static void iwl4965_gain_computation(struct iwl_priv *priv, 727static void
737 u32 *average_noise, 728il4965_gain_computation(struct il_priv *il, u32 * average_noise,
738 u16 min_average_noise_antenna_i, 729 u16 min_average_noise_antenna_i, u32 min_average_noise,
739 u32 min_average_noise, 730 u8 default_chain)
740 u8 default_chain)
741{ 731{
742 int i, ret; 732 int i, ret;
743 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 733 struct il_chain_noise_data *data = &il->chain_noise_data;
744 734
745 data->delta_gain_code[min_average_noise_antenna_i] = 0; 735 data->delta_gain_code[min_average_noise_antenna_i] = 0;
746 736
747 for (i = default_chain; i < NUM_RX_CHAINS; i++) { 737 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
748 s32 delta_g = 0; 738 s32 delta_g = 0;
749 739
750 if (!(data->disconn_array[i]) && 740 if (!data->disconn_array[i] &&
751 (data->delta_gain_code[i] == 741 data->delta_gain_code[i] ==
752 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { 742 CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
753 delta_g = average_noise[i] - min_average_noise; 743 delta_g = average_noise[i] - min_average_noise;
754 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15); 744 data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
755 data->delta_gain_code[i] = 745 data->delta_gain_code[i] =
756 min(data->delta_gain_code[i], 746 min(data->delta_gain_code[i],
757 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); 747 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
758 748
759 data->delta_gain_code[i] = 749 data->delta_gain_code[i] =
760 (data->delta_gain_code[i] | (1 << 2)); 750 (data->delta_gain_code[i] | (1 << 2));
761 } else { 751 } else {
762 data->delta_gain_code[i] = 0; 752 data->delta_gain_code[i] = 0;
763 } 753 }
764 } 754 }
765 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n", 755 D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
766 data->delta_gain_code[0], 756 data->delta_gain_code[1], data->delta_gain_code[2]);
767 data->delta_gain_code[1],
768 data->delta_gain_code[2]);
769 757
770 /* Differential gain gets sent to uCode only once */ 758 /* Differential gain gets sent to uCode only once */
771 if (!data->radio_write) { 759 if (!data->radio_write) {
772 struct iwl_calib_diff_gain_cmd cmd; 760 struct il_calib_diff_gain_cmd cmd;
773 data->radio_write = 1; 761 data->radio_write = 1;
774 762
775 memset(&cmd, 0, sizeof(cmd)); 763 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; 764 cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
777 cmd.diff_gain_a = data->delta_gain_code[0]; 765 cmd.diff_gain_a = data->delta_gain_code[0];
778 cmd.diff_gain_b = data->delta_gain_code[1]; 766 cmd.diff_gain_b = data->delta_gain_code[1];
779 cmd.diff_gain_c = data->delta_gain_code[2]; 767 cmd.diff_gain_c = data->delta_gain_code[2];
780 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 768 ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
781 sizeof(cmd), &cmd);
782 if (ret) 769 if (ret)
783 IWL_DEBUG_CALIB(priv, "fail sending cmd " 770 D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
784 "REPLY_PHY_CALIBRATION_CMD\n");
785 771
786 /* TODO we might want recalculate 772 /* TODO we might want recalculate
787 * rx_chain in rxon cmd */ 773 * rx_chain in rxon cmd */
788 774
789 /* Mark so we run this algo only once! */ 775 /* Mark so we run this algo only once! */
790 data->state = IWL_CHAIN_NOISE_CALIBRATED; 776 data->state = IL_CHAIN_NOISE_CALIBRATED;
791 } 777 }
792} 778}
793 779
794
795
796/* 780/*
797 * Accumulate 16 beacons of signal and noise statistics for each of 781 * Accumulate 16 beacons of signal and noise stats for each of
798 * 3 receivers/antennas/rx-chains, then figure out: 782 * 3 receivers/antennas/rx-chains, then figure out:
799 * 1) Which antennas are connected. 783 * 1) Which antennas are connected.
800 * 2) Differential rx gain settings to balance the 3 receivers. 784 * 2) Differential rx gain settings to balance the 3 receivers.
801 */ 785 */
802void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) 786void
787il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
803{ 788{
804 struct iwl_chain_noise_data *data = NULL; 789 struct il_chain_noise_data *data = NULL;
805 790
806 u32 chain_noise_a; 791 u32 chain_noise_a;
807 u32 chain_noise_b; 792 u32 chain_noise_b;
@@ -809,8 +794,8 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
809 u32 chain_sig_a; 794 u32 chain_sig_a;
810 u32 chain_sig_b; 795 u32 chain_sig_b;
811 u32 chain_sig_c; 796 u32 chain_sig_c;
812 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 797 u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
813 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 798 u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
814 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; 799 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
815 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; 800 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
816 u16 i = 0; 801 u16 i = 0;
@@ -819,70 +804,69 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
819 u8 rxon_band24; 804 u8 rxon_band24;
820 u8 stat_band24; 805 u8 stat_band24;
821 unsigned long flags; 806 unsigned long flags;
822 struct statistics_rx_non_phy *rx_info; 807 struct stats_rx_non_phy *rx_info;
823 808
824 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 809 struct il_rxon_context *ctx = &il->ctx;
825 810
826 if (priv->disable_chain_noise_cal) 811 if (il->disable_chain_noise_cal)
827 return; 812 return;
828 813
829 data = &(priv->chain_noise_data); 814 data = &(il->chain_noise_data);
830 815
831 /* 816 /*
832 * Accumulate just the first "chain_noise_num_beacons" after 817 * Accumulate just the first "chain_noise_num_beacons" after
833 * the first association, then we're done forever. 818 * the first association, then we're done forever.
834 */ 819 */
835 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { 820 if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
836 if (data->state == IWL_CHAIN_NOISE_ALIVE) 821 if (data->state == IL_CHAIN_NOISE_ALIVE)
837 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); 822 D_CALIB("Wait for noise calib reset\n");
838 return; 823 return;
839 } 824 }
840 825
841 spin_lock_irqsave(&priv->lock, flags); 826 spin_lock_irqsave(&il->lock, flags);
842 827
843 rx_info = &(((struct iwl_notif_statistics *)stat_resp)-> 828 rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
844 rx.general);
845 829
846 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 830 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
847 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); 831 D_CALIB(" << Interference data unavailable\n");
848 spin_unlock_irqrestore(&priv->lock, flags); 832 spin_unlock_irqrestore(&il->lock, flags);
849 return; 833 return;
850 } 834 }
851 835
852 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); 836 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
853 rxon_chnum = le16_to_cpu(ctx->staging.channel); 837 rxon_chnum = le16_to_cpu(ctx->staging.channel);
854 838
855 stat_band24 = !!(((struct iwl_notif_statistics *) 839 stat_band24 =
856 stat_resp)->flag & 840 !!(((struct il_notif_stats *)stat_resp)->
857 STATISTICS_REPLY_FLG_BAND_24G_MSK); 841 flag & STATS_REPLY_FLG_BAND_24G_MSK);
858 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *) 842 stat_chnum =
859 stat_resp)->flag) >> 16; 843 le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
860 844
861 /* Make sure we accumulate data for just the associated channel 845 /* Make sure we accumulate data for just the associated channel
862 * (even if scanning). */ 846 * (even if scanning). */
863 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { 847 if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
864 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", 848 D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
865 rxon_chnum, rxon_band24); 849 rxon_band24);
866 spin_unlock_irqrestore(&priv->lock, flags); 850 spin_unlock_irqrestore(&il->lock, flags);
867 return; 851 return;
868 } 852 }
869 853
870 /* 854 /*
871 * Accumulate beacon statistics values across 855 * Accumulate beacon stats values across
872 * "chain_noise_num_beacons" 856 * "chain_noise_num_beacons"
873 */ 857 */
874 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & 858 chain_noise_a =
875 IN_BAND_FILTER; 859 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
876 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & 860 chain_noise_b =
877 IN_BAND_FILTER; 861 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
878 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & 862 chain_noise_c =
879 IN_BAND_FILTER; 863 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
880 864
881 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; 865 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
882 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; 866 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
883 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; 867 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
884 868
885 spin_unlock_irqrestore(&priv->lock, flags); 869 spin_unlock_irqrestore(&il->lock, flags);
886 870
887 data->beacon_count++; 871 data->beacon_count++;
888 872
@@ -894,34 +878,33 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
894 data->chain_signal_b = (chain_sig_b + data->chain_signal_b); 878 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
895 data->chain_signal_c = (chain_sig_c + data->chain_signal_c); 879 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
896 880
897 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", 881 D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
898 rxon_chnum, rxon_band24, data->beacon_count); 882 data->beacon_count);
899 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", 883 D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
900 chain_sig_a, chain_sig_b, chain_sig_c); 884 chain_sig_c);
901 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", 885 D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
902 chain_noise_a, chain_noise_b, chain_noise_c); 886 chain_noise_c);
903 887
904 /* If this is the "chain_noise_num_beacons", determine: 888 /* If this is the "chain_noise_num_beacons", determine:
905 * 1) Disconnected antennas (using signal strengths) 889 * 1) Disconnected antennas (using signal strengths)
906 * 2) Differential gain (using silence noise) to balance receivers */ 890 * 2) Differential gain (using silence noise) to balance receivers */
907 if (data->beacon_count != 891 if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
908 priv->cfg->base_params->chain_noise_num_beacons)
909 return; 892 return;
910 893
911 /* Analyze signal for disconnected antenna */ 894 /* Analyze signal for disconnected antenna */
912 iwl4965_find_disconn_antenna(priv, average_sig, data); 895 il4965_find_disconn_antenna(il, average_sig, data);
913 896
914 /* Analyze noise for rx balance */ 897 /* Analyze noise for rx balance */
915 average_noise[0] = data->chain_noise_a / 898 average_noise[0] =
916 priv->cfg->base_params->chain_noise_num_beacons; 899 data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
917 average_noise[1] = data->chain_noise_b / 900 average_noise[1] =
918 priv->cfg->base_params->chain_noise_num_beacons; 901 data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
919 average_noise[2] = data->chain_noise_c / 902 average_noise[2] =
920 priv->cfg->base_params->chain_noise_num_beacons; 903 data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
921 904
922 for (i = 0; i < NUM_RX_CHAINS; i++) { 905 for (i = 0; i < NUM_RX_CHAINS; i++) {
923 if (!(data->disconn_array[i]) && 906 if (!data->disconn_array[i] &&
924 (average_noise[i] <= min_average_noise)) { 907 average_noise[i] <= min_average_noise) {
925 /* This means that chain i is active and has 908 /* This means that chain i is active and has
926 * lower noise values so far: */ 909 * lower noise values so far: */
927 min_average_noise = average_noise[i]; 910 min_average_noise = average_noise[i];
@@ -929,39 +912,37 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
929 } 912 }
930 } 913 }
931 914
932 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", 915 D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
933 average_noise[0], average_noise[1], 916 average_noise[1], average_noise[2]);
934 average_noise[2]);
935 917
936 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", 918 D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
937 min_average_noise, min_average_noise_antenna_i); 919 min_average_noise_antenna_i);
938 920
939 iwl4965_gain_computation(priv, average_noise, 921 il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
940 min_average_noise_antenna_i, min_average_noise, 922 min_average_noise,
941 iwl4965_find_first_chain(priv->cfg->valid_rx_ant)); 923 il4965_find_first_chain(il->cfg->valid_rx_ant));
942 924
943 /* Some power changes may have been made during the calibration. 925 /* Some power changes may have been made during the calibration.
944 * Update and commit the RXON 926 * Update and commit the RXON
945 */ 927 */
946 if (priv->cfg->ops->lib->update_chain_flags) 928 if (il->cfg->ops->lib->update_chain_flags)
947 priv->cfg->ops->lib->update_chain_flags(priv); 929 il->cfg->ops->lib->update_chain_flags(il);
948 930
949 data->state = IWL_CHAIN_NOISE_DONE; 931 data->state = IL_CHAIN_NOISE_DONE;
950 iwl_legacy_power_update_mode(priv, false); 932 il_power_update_mode(il, false);
951} 933}
952 934
953void iwl4965_reset_run_time_calib(struct iwl_priv *priv) 935void
936il4965_reset_run_time_calib(struct il_priv *il)
954{ 937{
955 int i; 938 int i;
956 memset(&(priv->sensitivity_data), 0, 939 memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
957 sizeof(struct iwl_sensitivity_data)); 940 memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
958 memset(&(priv->chain_noise_data), 0,
959 sizeof(struct iwl_chain_noise_data));
960 for (i = 0; i < NUM_RX_CHAINS; i++) 941 for (i = 0; i < NUM_RX_CHAINS; i++)
961 priv->chain_noise_data.delta_gain_code[i] = 942 il->chain_noise_data.delta_gain_code[i] =
962 CHAIN_NOISE_DELTA_GAIN_INIT_VAL; 943 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
963 944
964 /* Ask for statistics now, the uCode will send notification 945 /* Ask for stats now, the uCode will send notification
965 * periodically after association */ 946 * periodically after association */
966 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true); 947 il_send_stats_request(il, CMD_ASYNC, true);
967} 948}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
new file mode 100644
index 000000000000..98ec39f56ba3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -0,0 +1,746 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "common.h"
29#include "4965.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int
37il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
38{
39 int p = 0;
40 u32 flag;
41
42 flag = le32_to_cpu(il->_4965.stats.flag);
43
44 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
45 if (flag & UCODE_STATS_CLEAR_MSK)
46 p += scnprintf(buf + p, bufsz - p,
47 "\tStatistics have been cleared\n");
48 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
49 (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
50 "5.2 GHz");
51 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
52 (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
53 "disabled");
54
55 return p;
56}
57
58ssize_t
59il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
60 size_t count, loff_t *ppos)
61{
62 struct il_priv *il = file->private_data;
63 int pos = 0;
64 char *buf;
65 int bufsz =
66 sizeof(struct stats_rx_phy) * 40 +
67 sizeof(struct stats_rx_non_phy) * 40 +
68 sizeof(struct stats_rx_ht_phy) * 40 + 400;
69 ssize_t ret;
70 struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
71 struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
72 struct stats_rx_non_phy *general, *accum_general;
73 struct stats_rx_non_phy *delta_general, *max_general;
74 struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
75
76 if (!il_is_alive(il))
77 return -EAGAIN;
78
79 buf = kzalloc(bufsz, GFP_KERNEL);
80 if (!buf) {
81 IL_ERR("Can not allocate Buffer\n");
82 return -ENOMEM;
83 }
84
85 /*
86 * the statistic information display here is based on
87 * the last stats notification from uCode
88 * might not reflect the current uCode activity
89 */
90 ofdm = &il->_4965.stats.rx.ofdm;
91 cck = &il->_4965.stats.rx.cck;
92 general = &il->_4965.stats.rx.general;
93 ht = &il->_4965.stats.rx.ofdm_ht;
94 accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
95 accum_cck = &il->_4965.accum_stats.rx.cck;
96 accum_general = &il->_4965.accum_stats.rx.general;
97 accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
98 delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
99 delta_cck = &il->_4965.delta_stats.rx.cck;
100 delta_general = &il->_4965.delta_stats.rx.general;
101 delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
102 max_ofdm = &il->_4965.max_delta.rx.ofdm;
103 max_cck = &il->_4965.max_delta.rx.cck;
104 max_general = &il->_4965.max_delta.rx.general;
105 max_ht = &il->_4965.max_delta.rx.ofdm_ht;
106
107 pos += il4965_stats_flag(il, buf, bufsz);
108 pos +=
109 scnprintf(buf + pos, bufsz - pos, fmt_header,
110 "Statistics_Rx - OFDM:");
111 pos +=
112 scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
113 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
114 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
115 pos +=
116 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
117 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
118 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
119 pos +=
120 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
121 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
122 delta_ofdm->plcp_err, max_ofdm->plcp_err);
123 pos +=
124 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
125 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
126 delta_ofdm->crc32_err, max_ofdm->crc32_err);
127 pos +=
128 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
129 le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
130 delta_ofdm->overrun_err, max_ofdm->overrun_err);
131 pos +=
132 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
133 le32_to_cpu(ofdm->early_overrun_err),
134 accum_ofdm->early_overrun_err,
135 delta_ofdm->early_overrun_err,
136 max_ofdm->early_overrun_err);
137 pos +=
138 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
139 le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
140 delta_ofdm->crc32_good, max_ofdm->crc32_good);
141 pos +=
142 scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
143 le32_to_cpu(ofdm->false_alarm_cnt),
144 accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos +=
147 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos +=
153 scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
155 delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
156 pos +=
157 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
159 delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
160 pos +=
161 scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
162 le32_to_cpu(ofdm->unresponded_rts),
163 accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
164 max_ofdm->unresponded_rts);
165 pos +=
166 scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
167 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
168 accum_ofdm->rxe_frame_limit_overrun,
169 delta_ofdm->rxe_frame_limit_overrun,
170 max_ofdm->rxe_frame_limit_overrun);
171 pos +=
172 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
173 le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
174 delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
175 pos +=
176 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
177 le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
178 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
179 pos +=
180 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
181 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
182 accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
183 max_ofdm->sent_ba_rsp_cnt);
184 pos +=
185 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
186 le32_to_cpu(ofdm->dsp_self_kill),
187 accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
188 max_ofdm->dsp_self_kill);
189 pos +=
190 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
191 le32_to_cpu(ofdm->mh_format_err),
192 accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
193 max_ofdm->mh_format_err);
194 pos +=
195 scnprintf(buf + pos, bufsz - pos, fmt_table,
196 "re_acq_main_rssi_sum:",
197 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
198 accum_ofdm->re_acq_main_rssi_sum,
199 delta_ofdm->re_acq_main_rssi_sum,
200 max_ofdm->re_acq_main_rssi_sum);
201
202 pos +=
203 scnprintf(buf + pos, bufsz - pos, fmt_header,
204 "Statistics_Rx - CCK:");
205 pos +=
206 scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
207 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
208 delta_cck->ina_cnt, max_cck->ina_cnt);
209 pos +=
210 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
211 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
212 delta_cck->fina_cnt, max_cck->fina_cnt);
213 pos +=
214 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
215 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
216 delta_cck->plcp_err, max_cck->plcp_err);
217 pos +=
218 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
219 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
220 delta_cck->crc32_err, max_cck->crc32_err);
221 pos +=
222 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
223 le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
224 delta_cck->overrun_err, max_cck->overrun_err);
225 pos +=
226 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
227 le32_to_cpu(cck->early_overrun_err),
228 accum_cck->early_overrun_err,
229 delta_cck->early_overrun_err, max_cck->early_overrun_err);
230 pos +=
231 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
232 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
233 delta_cck->crc32_good, max_cck->crc32_good);
234 pos +=
235 scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
236 le32_to_cpu(cck->false_alarm_cnt),
237 accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
238 max_cck->false_alarm_cnt);
239 pos +=
240 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
241 le32_to_cpu(cck->fina_sync_err_cnt),
242 accum_cck->fina_sync_err_cnt,
243 delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
244 pos +=
245 scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
246 le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
247 delta_cck->sfd_timeout, max_cck->sfd_timeout);
248 pos +=
249 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
250 le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
251 delta_cck->fina_timeout, max_cck->fina_timeout);
252 pos +=
253 scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
254 le32_to_cpu(cck->unresponded_rts),
255 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
256 max_cck->unresponded_rts);
257 pos +=
258 scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
259 le32_to_cpu(cck->rxe_frame_limit_overrun),
260 accum_cck->rxe_frame_limit_overrun,
261 delta_cck->rxe_frame_limit_overrun,
262 max_cck->rxe_frame_limit_overrun);
263 pos +=
264 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
265 le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
266 delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
267 pos +=
268 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
269 le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
270 delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
271 pos +=
272 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
273 le32_to_cpu(cck->sent_ba_rsp_cnt),
274 accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
275 max_cck->sent_ba_rsp_cnt);
276 pos +=
277 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
278 le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
279 delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
280 pos +=
281 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
282 le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
283 delta_cck->mh_format_err, max_cck->mh_format_err);
284 pos +=
285 scnprintf(buf + pos, bufsz - pos, fmt_table,
286 "re_acq_main_rssi_sum:",
287 le32_to_cpu(cck->re_acq_main_rssi_sum),
288 accum_cck->re_acq_main_rssi_sum,
289 delta_cck->re_acq_main_rssi_sum,
290 max_cck->re_acq_main_rssi_sum);
291
292 pos +=
293 scnprintf(buf + pos, bufsz - pos, fmt_header,
294 "Statistics_Rx - GENERAL:");
295 pos +=
296 scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
297 le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
298 delta_general->bogus_cts, max_general->bogus_cts);
299 pos +=
300 scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
301 le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
302 delta_general->bogus_ack, max_general->bogus_ack);
303 pos +=
304 scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
305 le32_to_cpu(general->non_bssid_frames),
306 accum_general->non_bssid_frames,
307 delta_general->non_bssid_frames,
308 max_general->non_bssid_frames);
309 pos +=
310 scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
311 le32_to_cpu(general->filtered_frames),
312 accum_general->filtered_frames,
313 delta_general->filtered_frames,
314 max_general->filtered_frames);
315 pos +=
316 scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
317 le32_to_cpu(general->non_channel_beacons),
318 accum_general->non_channel_beacons,
319 delta_general->non_channel_beacons,
320 max_general->non_channel_beacons);
321 pos +=
322 scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
323 le32_to_cpu(general->channel_beacons),
324 accum_general->channel_beacons,
325 delta_general->channel_beacons,
326 max_general->channel_beacons);
327 pos +=
328 scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
329 le32_to_cpu(general->num_missed_bcon),
330 accum_general->num_missed_bcon,
331 delta_general->num_missed_bcon,
332 max_general->num_missed_bcon);
333 pos +=
334 scnprintf(buf + pos, bufsz - pos, fmt_table,
335 "adc_rx_saturation_time:",
336 le32_to_cpu(general->adc_rx_saturation_time),
337 accum_general->adc_rx_saturation_time,
338 delta_general->adc_rx_saturation_time,
339 max_general->adc_rx_saturation_time);
340 pos +=
341 scnprintf(buf + pos, bufsz - pos, fmt_table,
342 "ina_detect_search_tm:",
343 le32_to_cpu(general->ina_detection_search_time),
344 accum_general->ina_detection_search_time,
345 delta_general->ina_detection_search_time,
346 max_general->ina_detection_search_time);
347 pos +=
348 scnprintf(buf + pos, bufsz - pos, fmt_table,
349 "beacon_silence_rssi_a:",
350 le32_to_cpu(general->beacon_silence_rssi_a),
351 accum_general->beacon_silence_rssi_a,
352 delta_general->beacon_silence_rssi_a,
353 max_general->beacon_silence_rssi_a);
354 pos +=
355 scnprintf(buf + pos, bufsz - pos, fmt_table,
356 "beacon_silence_rssi_b:",
357 le32_to_cpu(general->beacon_silence_rssi_b),
358 accum_general->beacon_silence_rssi_b,
359 delta_general->beacon_silence_rssi_b,
360 max_general->beacon_silence_rssi_b);
361 pos +=
362 scnprintf(buf + pos, bufsz - pos, fmt_table,
363 "beacon_silence_rssi_c:",
364 le32_to_cpu(general->beacon_silence_rssi_c),
365 accum_general->beacon_silence_rssi_c,
366 delta_general->beacon_silence_rssi_c,
367 max_general->beacon_silence_rssi_c);
368 pos +=
369 scnprintf(buf + pos, bufsz - pos, fmt_table,
370 "interference_data_flag:",
371 le32_to_cpu(general->interference_data_flag),
372 accum_general->interference_data_flag,
373 delta_general->interference_data_flag,
374 max_general->interference_data_flag);
375 pos +=
376 scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
377 le32_to_cpu(general->channel_load),
378 accum_general->channel_load, delta_general->channel_load,
379 max_general->channel_load);
380 pos +=
381 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
382 le32_to_cpu(general->dsp_false_alarms),
383 accum_general->dsp_false_alarms,
384 delta_general->dsp_false_alarms,
385 max_general->dsp_false_alarms);
386 pos +=
387 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
388 le32_to_cpu(general->beacon_rssi_a),
389 accum_general->beacon_rssi_a,
390 delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
391 pos +=
392 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
393 le32_to_cpu(general->beacon_rssi_b),
394 accum_general->beacon_rssi_b,
395 delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
396 pos +=
397 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
398 le32_to_cpu(general->beacon_rssi_c),
399 accum_general->beacon_rssi_c,
400 delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
401 pos +=
402 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
403 le32_to_cpu(general->beacon_energy_a),
404 accum_general->beacon_energy_a,
405 delta_general->beacon_energy_a,
406 max_general->beacon_energy_a);
407 pos +=
408 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
409 le32_to_cpu(general->beacon_energy_b),
410 accum_general->beacon_energy_b,
411 delta_general->beacon_energy_b,
412 max_general->beacon_energy_b);
413 pos +=
414 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
415 le32_to_cpu(general->beacon_energy_c),
416 accum_general->beacon_energy_c,
417 delta_general->beacon_energy_c,
418 max_general->beacon_energy_c);
419
420 pos +=
421 scnprintf(buf + pos, bufsz - pos, fmt_header,
422 "Statistics_Rx - OFDM_HT:");
423 pos +=
424 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
425 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
426 delta_ht->plcp_err, max_ht->plcp_err);
427 pos +=
428 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
429 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
430 delta_ht->overrun_err, max_ht->overrun_err);
431 pos +=
432 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
433 le32_to_cpu(ht->early_overrun_err),
434 accum_ht->early_overrun_err, delta_ht->early_overrun_err,
435 max_ht->early_overrun_err);
436 pos +=
437 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
438 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
439 delta_ht->crc32_good, max_ht->crc32_good);
440 pos +=
441 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
442 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
443 delta_ht->crc32_err, max_ht->crc32_err);
444 pos +=
445 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
446 le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
447 delta_ht->mh_format_err, max_ht->mh_format_err);
448 pos +=
449 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
450 le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
451 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
452 pos +=
453 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
454 le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
455 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
456 pos +=
457 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
458 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
459 delta_ht->agg_cnt, max_ht->agg_cnt);
460 pos +=
461 scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
462 le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
463 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
464
465 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
466 kfree(buf);
467 return ret;
468}
469
470ssize_t
471il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
472 size_t count, loff_t *ppos)
473{
474 struct il_priv *il = file->private_data;
475 int pos = 0;
476 char *buf;
477 int bufsz = (sizeof(struct stats_tx) * 48) + 250;
478 ssize_t ret;
479 struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
480
481 if (!il_is_alive(il))
482 return -EAGAIN;
483
484 buf = kzalloc(bufsz, GFP_KERNEL);
485 if (!buf) {
486 IL_ERR("Can not allocate Buffer\n");
487 return -ENOMEM;
488 }
489
490 /* the statistic information display here is based on
491 * the last stats notification from uCode
492 * might not reflect the current uCode activity
493 */
494 tx = &il->_4965.stats.tx;
495 accum_tx = &il->_4965.accum_stats.tx;
496 delta_tx = &il->_4965.delta_stats.tx;
497 max_tx = &il->_4965.max_delta.tx;
498
499 pos += il4965_stats_flag(il, buf, bufsz);
500 pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
501 pos +=
502 scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
503 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
504 delta_tx->preamble_cnt, max_tx->preamble_cnt);
505 pos +=
506 scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
507 le32_to_cpu(tx->rx_detected_cnt),
508 accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
509 max_tx->rx_detected_cnt);
510 pos +=
511 scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
512 le32_to_cpu(tx->bt_prio_defer_cnt),
513 accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
514 max_tx->bt_prio_defer_cnt);
515 pos +=
516 scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
517 le32_to_cpu(tx->bt_prio_kill_cnt),
518 accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
519 max_tx->bt_prio_kill_cnt);
520 pos +=
521 scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
522 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
523 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
524 pos +=
525 scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
526 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
527 delta_tx->cts_timeout, max_tx->cts_timeout);
528 pos +=
529 scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
530 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
531 delta_tx->ack_timeout, max_tx->ack_timeout);
532 pos +=
533 scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
534 le32_to_cpu(tx->expected_ack_cnt),
535 accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
536 max_tx->expected_ack_cnt);
537 pos +=
538 scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
539 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
540 delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
541 pos +=
542 scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
543 le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
544 delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
545 pos +=
546 scnprintf(buf + pos, bufsz - pos, fmt_table,
547 "abort_nxt_frame_mismatch:",
548 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
549 accum_tx->burst_abort_next_frame_mismatch_cnt,
550 delta_tx->burst_abort_next_frame_mismatch_cnt,
551 max_tx->burst_abort_next_frame_mismatch_cnt);
552 pos +=
553 scnprintf(buf + pos, bufsz - pos, fmt_table,
554 "abort_missing_nxt_frame:",
555 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
556 accum_tx->burst_abort_missing_next_frame_cnt,
557 delta_tx->burst_abort_missing_next_frame_cnt,
558 max_tx->burst_abort_missing_next_frame_cnt);
559 pos +=
560 scnprintf(buf + pos, bufsz - pos, fmt_table,
561 "cts_timeout_collision:",
562 le32_to_cpu(tx->cts_timeout_collision),
563 accum_tx->cts_timeout_collision,
564 delta_tx->cts_timeout_collision,
565 max_tx->cts_timeout_collision);
566 pos +=
567 scnprintf(buf + pos, bufsz - pos, fmt_table,
568 "ack_ba_timeout_collision:",
569 le32_to_cpu(tx->ack_or_ba_timeout_collision),
570 accum_tx->ack_or_ba_timeout_collision,
571 delta_tx->ack_or_ba_timeout_collision,
572 max_tx->ack_or_ba_timeout_collision);
573 pos +=
574 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
575 le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
576 delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
577 pos +=
578 scnprintf(buf + pos, bufsz - pos, fmt_table,
579 "agg ba_resched_frames:",
580 le32_to_cpu(tx->agg.ba_reschedule_frames),
581 accum_tx->agg.ba_reschedule_frames,
582 delta_tx->agg.ba_reschedule_frames,
583 max_tx->agg.ba_reschedule_frames);
584 pos +=
585 scnprintf(buf + pos, bufsz - pos, fmt_table,
586 "agg scd_query_agg_frame:",
587 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
588 accum_tx->agg.scd_query_agg_frame_cnt,
589 delta_tx->agg.scd_query_agg_frame_cnt,
590 max_tx->agg.scd_query_agg_frame_cnt);
591 pos +=
592 scnprintf(buf + pos, bufsz - pos, fmt_table,
593 "agg scd_query_no_agg:",
594 le32_to_cpu(tx->agg.scd_query_no_agg),
595 accum_tx->agg.scd_query_no_agg,
596 delta_tx->agg.scd_query_no_agg,
597 max_tx->agg.scd_query_no_agg);
598 pos +=
599 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
600 le32_to_cpu(tx->agg.scd_query_agg),
601 accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
602 max_tx->agg.scd_query_agg);
603 pos +=
604 scnprintf(buf + pos, bufsz - pos, fmt_table,
605 "agg scd_query_mismatch:",
606 le32_to_cpu(tx->agg.scd_query_mismatch),
607 accum_tx->agg.scd_query_mismatch,
608 delta_tx->agg.scd_query_mismatch,
609 max_tx->agg.scd_query_mismatch);
610 pos +=
611 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
612 le32_to_cpu(tx->agg.frame_not_ready),
613 accum_tx->agg.frame_not_ready,
614 delta_tx->agg.frame_not_ready,
615 max_tx->agg.frame_not_ready);
616 pos +=
617 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
618 le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
619 delta_tx->agg.underrun, max_tx->agg.underrun);
620 pos +=
621 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
622 le32_to_cpu(tx->agg.bt_prio_kill),
623 accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
624 max_tx->agg.bt_prio_kill);
625 pos +=
626 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
627 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
628 accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
629 max_tx->agg.rx_ba_rsp_cnt);
630
631 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
632 kfree(buf);
633 return ret;
634}
635
636ssize_t
637il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
638 size_t count, loff_t *ppos)
639{
640 struct il_priv *il = file->private_data;
641 int pos = 0;
642 char *buf;
643 int bufsz = sizeof(struct stats_general) * 10 + 300;
644 ssize_t ret;
645 struct stats_general_common *general, *accum_general;
646 struct stats_general_common *delta_general, *max_general;
647 struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
648 struct stats_div *div, *accum_div, *delta_div, *max_div;
649
650 if (!il_is_alive(il))
651 return -EAGAIN;
652
653 buf = kzalloc(bufsz, GFP_KERNEL);
654 if (!buf) {
655 IL_ERR("Can not allocate Buffer\n");
656 return -ENOMEM;
657 }
658
659 /* the statistic information display here is based on
660 * the last stats notification from uCode
661 * might not reflect the current uCode activity
662 */
663 general = &il->_4965.stats.general.common;
664 dbg = &il->_4965.stats.general.common.dbg;
665 div = &il->_4965.stats.general.common.div;
666 accum_general = &il->_4965.accum_stats.general.common;
667 accum_dbg = &il->_4965.accum_stats.general.common.dbg;
668 accum_div = &il->_4965.accum_stats.general.common.div;
669 delta_general = &il->_4965.delta_stats.general.common;
670 max_general = &il->_4965.max_delta.general.common;
671 delta_dbg = &il->_4965.delta_stats.general.common.dbg;
672 max_dbg = &il->_4965.max_delta.general.common.dbg;
673 delta_div = &il->_4965.delta_stats.general.common.div;
674 max_div = &il->_4965.max_delta.general.common.div;
675
676 pos += il4965_stats_flag(il, buf, bufsz);
677 pos +=
678 scnprintf(buf + pos, bufsz - pos, fmt_header,
679 "Statistics_General:");
680 pos +=
681 scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
682 le32_to_cpu(general->temperature));
683 pos +=
684 scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
685 le32_to_cpu(general->ttl_timestamp));
686 pos +=
687 scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
688 le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
689 delta_dbg->burst_check, max_dbg->burst_check);
690 pos +=
691 scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
692 le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
693 delta_dbg->burst_count, max_dbg->burst_count);
694 pos +=
695 scnprintf(buf + pos, bufsz - pos, fmt_table,
696 "wait_for_silence_timeout_count:",
697 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
698 accum_dbg->wait_for_silence_timeout_cnt,
699 delta_dbg->wait_for_silence_timeout_cnt,
700 max_dbg->wait_for_silence_timeout_cnt);
701 pos +=
702 scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
703 le32_to_cpu(general->sleep_time),
704 accum_general->sleep_time, delta_general->sleep_time,
705 max_general->sleep_time);
706 pos +=
707 scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
708 le32_to_cpu(general->slots_out), accum_general->slots_out,
709 delta_general->slots_out, max_general->slots_out);
710 pos +=
711 scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
712 le32_to_cpu(general->slots_idle),
713 accum_general->slots_idle, delta_general->slots_idle,
714 max_general->slots_idle);
715 pos +=
716 scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
717 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
718 delta_div->tx_on_a, max_div->tx_on_a);
719 pos +=
720 scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
721 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
722 delta_div->tx_on_b, max_div->tx_on_b);
723 pos +=
724 scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
725 le32_to_cpu(div->exec_time), accum_div->exec_time,
726 delta_div->exec_time, max_div->exec_time);
727 pos +=
728 scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
729 le32_to_cpu(div->probe_time), accum_div->probe_time,
730 delta_div->probe_time, max_div->probe_time);
731 pos +=
732 scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
733 le32_to_cpu(general->rx_enable_counter),
734 accum_general->rx_enable_counter,
735 delta_general->rx_enable_counter,
736 max_general->rx_enable_counter);
737 pos +=
738 scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
739 le32_to_cpu(general->num_of_sos_states),
740 accum_general->num_of_sos_states,
741 delta_general->num_of_sos_states,
742 max_general->num_of_sos_states);
743 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
744 kfree(buf);
745 return ret;
746}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
new file mode 100644
index 000000000000..4aaef4135564
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -0,0 +1,6536 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "common.h"
54#include "4965.h"
55
56/******************************************************************************
57 *
58 * module boiler plate
59 *
60 ******************************************************************************/
61
62/*
63 * module name, copyright, version, etc.
64 */
65#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
66
67#ifdef CONFIG_IWLEGACY_DEBUG
68#define VD "d"
69#else
70#define VD
71#endif
72
73#define DRV_VERSION IWLWIFI_VERSION VD
74
75MODULE_DESCRIPTION(DRV_DESCRIPTION);
76MODULE_VERSION(DRV_VERSION);
77MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
78MODULE_LICENSE("GPL");
79MODULE_ALIAS("iwl4965");
80
81void
82il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
83{
84 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
85 IL_ERR("Tx flush command to flush out all frames\n");
86 if (!test_bit(S_EXIT_PENDING, &il->status))
87 queue_work(il->workqueue, &il->tx_flush);
88 }
89}
90
91/*
92 * EEPROM
93 */
94struct il_mod_params il4965_mod_params = {
95 .amsdu_size_8K = 1,
96 .restart_fw = 1,
97 /* the rest are 0 by default */
98};
99
100void
101il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
102{
103 unsigned long flags;
104 int i;
105 spin_lock_irqsave(&rxq->lock, flags);
106 INIT_LIST_HEAD(&rxq->rx_free);
107 INIT_LIST_HEAD(&rxq->rx_used);
108 /* Fill the rx_used queue with _all_ of the Rx buffers */
109 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
110 /* In the reset function, these buffers may have been allocated
111 * to an SKB, so we need to unmap and free potential storage */
112 if (rxq->pool[i].page != NULL) {
113 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
114 PAGE_SIZE << il->hw_params.rx_page_order,
115 PCI_DMA_FROMDEVICE);
116 __il_free_pages(il, rxq->pool[i].page);
117 rxq->pool[i].page = NULL;
118 }
119 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
120 }
121
122 for (i = 0; i < RX_QUEUE_SIZE; i++)
123 rxq->queue[i] = NULL;
124
125 /* Set us so that we have processed and used all buffers, but have
126 * not restocked the Rx queue with fresh buffers */
127 rxq->read = rxq->write = 0;
128 rxq->write_actual = 0;
129 rxq->free_count = 0;
130 spin_unlock_irqrestore(&rxq->lock, flags);
131}
132
133int
134il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
135{
136 u32 rb_size;
137 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
138 u32 rb_timeout = 0;
139
140 if (il->cfg->mod_params->amsdu_size_8K)
141 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
142 else
143 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
144
145 /* Stop Rx DMA */
146 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
147
148 /* Reset driver's Rx queue write idx */
149 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
150
151 /* Tell device where to find RBD circular buffer in DRAM */
152 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
153
154 /* Tell device where in DRAM to update its Rx status */
155 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
156
157 /* Enable Rx DMA
158 * Direct rx interrupts to hosts
159 * Rx buffer size 4 or 8k
160 * RB timeout 0x10
161 * 256 RBDs
162 */
163 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
164 FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
165 FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
166 FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
167 rb_size |
168 (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
169 (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
170
171 /* Set interrupt coalescing timer to default (2048 usecs) */
172 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
173
174 return 0;
175}
176
177static void
178il4965_set_pwr_vmain(struct il_priv *il)
179{
180/*
181 * (for documentation purposes)
182 * to set power to V_AUX, do:
183
184 if (pci_pme_capable(il->pci_dev, PCI_D3cold))
185 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
186 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
187 ~APMG_PS_CTRL_MSK_PWR_SRC);
188 */
189
190 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
191 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
192 ~APMG_PS_CTRL_MSK_PWR_SRC);
193}
194
195int
196il4965_hw_nic_init(struct il_priv *il)
197{
198 unsigned long flags;
199 struct il_rx_queue *rxq = &il->rxq;
200 int ret;
201
202 /* nic_init */
203 spin_lock_irqsave(&il->lock, flags);
204 il->cfg->ops->lib->apm_ops.init(il);
205
206 /* Set interrupt coalescing calibration timer to default (512 usecs) */
207 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
208
209 spin_unlock_irqrestore(&il->lock, flags);
210
211 il4965_set_pwr_vmain(il);
212
213 il->cfg->ops->lib->apm_ops.config(il);
214
215 /* Allocate the RX queue, or reset if it is already allocated */
216 if (!rxq->bd) {
217 ret = il_rx_queue_alloc(il);
218 if (ret) {
219 IL_ERR("Unable to initialize Rx queue\n");
220 return -ENOMEM;
221 }
222 } else
223 il4965_rx_queue_reset(il, rxq);
224
225 il4965_rx_replenish(il);
226
227 il4965_rx_init(il, rxq);
228
229 spin_lock_irqsave(&il->lock, flags);
230
231 rxq->need_update = 1;
232 il_rx_queue_update_write_ptr(il, rxq);
233
234 spin_unlock_irqrestore(&il->lock, flags);
235
236 /* Allocate or reset and init all Tx and Command queues */
237 if (!il->txq) {
238 ret = il4965_txq_ctx_alloc(il);
239 if (ret)
240 return ret;
241 } else
242 il4965_txq_ctx_reset(il);
243
244 set_bit(S_INIT, &il->status);
245
246 return 0;
247}
248
249/**
250 * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
251 */
252static inline __le32
253il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
254{
255 return cpu_to_le32((u32) (dma_addr >> 8));
256}
257
258/**
259 * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
260 *
261 * If there are slots in the RX queue that need to be restocked,
262 * and we have free pre-allocated buffers, fill the ranks as much
263 * as we can, pulling from rx_free.
264 *
265 * This moves the 'write' idx forward to catch up with 'processed', and
266 * also updates the memory address in the firmware to reference the new
267 * target buffer.
268 */
269void
270il4965_rx_queue_restock(struct il_priv *il)
271{
272 struct il_rx_queue *rxq = &il->rxq;
273 struct list_head *element;
274 struct il_rx_buf *rxb;
275 unsigned long flags;
276
277 spin_lock_irqsave(&rxq->lock, flags);
278 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
279 /* The overwritten rxb must be a used one */
280 rxb = rxq->queue[rxq->write];
281 BUG_ON(rxb && rxb->page);
282
283 /* Get next free Rx buffer, remove from free list */
284 element = rxq->rx_free.next;
285 rxb = list_entry(element, struct il_rx_buf, list);
286 list_del(element);
287
288 /* Point to Rx buffer via next RBD in circular buffer */
289 rxq->bd[rxq->write] =
290 il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
291 rxq->queue[rxq->write] = rxb;
292 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
293 rxq->free_count--;
294 }
295 spin_unlock_irqrestore(&rxq->lock, flags);
296 /* If the pre-allocated buffer pool is dropping low, schedule to
297 * refill it */
298 if (rxq->free_count <= RX_LOW_WATERMARK)
299 queue_work(il->workqueue, &il->rx_replenish);
300
301 /* If we've added more space for the firmware to place data, tell it.
302 * Increment device's write pointer in multiples of 8. */
303 if (rxq->write_actual != (rxq->write & ~0x7)) {
304 spin_lock_irqsave(&rxq->lock, flags);
305 rxq->need_update = 1;
306 spin_unlock_irqrestore(&rxq->lock, flags);
307 il_rx_queue_update_write_ptr(il, rxq);
308 }
309}
310
311/**
312 * il4965_rx_replenish - Move all used packet from rx_used to rx_free
313 *
314 * When moving to rx_free an SKB is allocated for the slot.
315 *
316 * Also restock the Rx queue via il_rx_queue_restock.
317 * This is called as a scheduled work item (except for during initialization)
318 */
319static void
320il4965_rx_allocate(struct il_priv *il, gfp_t priority)
321{
322 struct il_rx_queue *rxq = &il->rxq;
323 struct list_head *element;
324 struct il_rx_buf *rxb;
325 struct page *page;
326 unsigned long flags;
327 gfp_t gfp_mask = priority;
328
329 while (1) {
330 spin_lock_irqsave(&rxq->lock, flags);
331 if (list_empty(&rxq->rx_used)) {
332 spin_unlock_irqrestore(&rxq->lock, flags);
333 return;
334 }
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 if (rxq->free_count > RX_LOW_WATERMARK)
338 gfp_mask |= __GFP_NOWARN;
339
340 if (il->hw_params.rx_page_order > 0)
341 gfp_mask |= __GFP_COMP;
342
343 /* Alloc a new receive buffer */
344 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
345 if (!page) {
346 if (net_ratelimit())
347 D_INFO("alloc_pages failed, " "order: %d\n",
348 il->hw_params.rx_page_order);
349
350 if (rxq->free_count <= RX_LOW_WATERMARK &&
351 net_ratelimit())
352 IL_ERR("Failed to alloc_pages with %s. "
353 "Only %u free buffers remaining.\n",
354 priority ==
355 GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
356 rxq->free_count);
357 /* We don't reschedule replenish work here -- we will
358 * call the restock method and if it still needs
359 * more buffers it will schedule replenish */
360 return;
361 }
362
363 spin_lock_irqsave(&rxq->lock, flags);
364
365 if (list_empty(&rxq->rx_used)) {
366 spin_unlock_irqrestore(&rxq->lock, flags);
367 __free_pages(page, il->hw_params.rx_page_order);
368 return;
369 }
370 element = rxq->rx_used.next;
371 rxb = list_entry(element, struct il_rx_buf, list);
372 list_del(element);
373
374 spin_unlock_irqrestore(&rxq->lock, flags);
375
376 BUG_ON(rxb->page);
377 rxb->page = page;
378 /* Get physical address of the RB */
379 rxb->page_dma =
380 pci_map_page(il->pci_dev, page, 0,
381 PAGE_SIZE << il->hw_params.rx_page_order,
382 PCI_DMA_FROMDEVICE);
383 /* dma address must be no more than 36 bits */
384 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
385 /* and also 256 byte aligned! */
386 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
387
388 spin_lock_irqsave(&rxq->lock, flags);
389
390 list_add_tail(&rxb->list, &rxq->rx_free);
391 rxq->free_count++;
392 il->alloc_rxb_page++;
393
394 spin_unlock_irqrestore(&rxq->lock, flags);
395 }
396}
397
398void
399il4965_rx_replenish(struct il_priv *il)
400{
401 unsigned long flags;
402
403 il4965_rx_allocate(il, GFP_KERNEL);
404
405 spin_lock_irqsave(&il->lock, flags);
406 il4965_rx_queue_restock(il);
407 spin_unlock_irqrestore(&il->lock, flags);
408}
409
410void
411il4965_rx_replenish_now(struct il_priv *il)
412{
413 il4965_rx_allocate(il, GFP_ATOMIC);
414
415 il4965_rx_queue_restock(il);
416}
417
418/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
419 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
420 * This free routine walks the list of POOL entries and if SKB is set to
421 * non NULL it is unmapped and freed
422 */
423void
424il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
425{
426 int i;
427 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
428 if (rxq->pool[i].page != NULL) {
429 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
430 PAGE_SIZE << il->hw_params.rx_page_order,
431 PCI_DMA_FROMDEVICE);
432 __il_free_pages(il, rxq->pool[i].page);
433 rxq->pool[i].page = NULL;
434 }
435 }
436
437 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
438 rxq->bd_dma);
439 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
440 rxq->rb_stts, rxq->rb_stts_dma);
441 rxq->bd = NULL;
442 rxq->rb_stts = NULL;
443}
444
445int
446il4965_rxq_stop(struct il_priv *il)
447{
448
449 /* stop Rx DMA */
450 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
451 il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
452 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
453
454 return 0;
455}
456
457int
458il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
459{
460 int idx = 0;
461 int band_offset = 0;
462
463 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
464 if (rate_n_flags & RATE_MCS_HT_MSK) {
465 idx = (rate_n_flags & 0xff);
466 return idx;
467 /* Legacy rate format, search for match in table */
468 } else {
469 if (band == IEEE80211_BAND_5GHZ)
470 band_offset = IL_FIRST_OFDM_RATE;
471 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
472 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
473 return idx - band_offset;
474 }
475
476 return -1;
477}
478
479static int
480il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
481{
482 /* data from PHY/DSP regarding signal strength, etc.,
483 * contents are always there, not configurable by host. */
484 struct il4965_rx_non_cfg_phy *ncphy =
485 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
486 u32 agc =
487 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
488 IL49_AGC_DB_POS;
489
490 u32 valid_antennae =
491 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
492 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
493 u8 max_rssi = 0;
494 u32 i;
495
496 /* Find max rssi among 3 possible receivers.
497 * These values are measured by the digital signal processor (DSP).
498 * They should stay fairly constant even as the signal strength varies,
499 * if the radio's automatic gain control (AGC) is working right.
500 * AGC value (see below) will provide the "interesting" info. */
501 for (i = 0; i < 3; i++)
502 if (valid_antennae & (1 << i))
503 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
504
505 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
506 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
507 max_rssi, agc);
508
509 /* dBm = max_rssi dB - agc dB - constant.
510 * Higher AGC (higher radio gain) means lower signal. */
511 return max_rssi - agc - IL4965_RSSI_OFFSET;
512}
513
514static u32
515il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
516{
517 u32 decrypt_out = 0;
518
519 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
520 RX_RES_STATUS_STATION_FOUND)
521 decrypt_out |=
522 (RX_RES_STATUS_STATION_FOUND |
523 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
524
525 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
526
527 /* packet was not encrypted */
528 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
529 RX_RES_STATUS_SEC_TYPE_NONE)
530 return decrypt_out;
531
532 /* packet was encrypted with unknown alg */
533 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
534 RX_RES_STATUS_SEC_TYPE_ERR)
535 return decrypt_out;
536
537 /* decryption was not done in HW */
538 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
539 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
540 return decrypt_out;
541
542 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
543
544 case RX_RES_STATUS_SEC_TYPE_CCMP:
545 /* alg is CCM: check MIC only */
546 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
547 /* Bad MIC */
548 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
549 else
550 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
551
552 break;
553
554 case RX_RES_STATUS_SEC_TYPE_TKIP:
555 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
556 /* Bad TTAK */
557 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
558 break;
559 }
560 /* fall through if TTAK OK */
561 default:
562 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
563 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
564 else
565 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
566 break;
567 }
568
569 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
570
571 return decrypt_out;
572}
573
574static void
575il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
576 u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
577 struct ieee80211_rx_status *stats)
578{
579 struct sk_buff *skb;
580 __le16 fc = hdr->frame_control;
581
582 /* We only process data packets if the interface is open */
583 if (unlikely(!il->is_open)) {
584 D_DROP("Dropping packet while interface is not open.\n");
585 return;
586 }
587
588 /* In case of HW accelerated crypto and bad decryption, drop */
589 if (!il->cfg->mod_params->sw_crypto &&
590 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
591 return;
592
593 skb = dev_alloc_skb(128);
594 if (!skb) {
595 IL_ERR("dev_alloc_skb failed\n");
596 return;
597 }
598
599 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
600
601 il_update_stats(il, false, fc, len);
602 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
603
604 ieee80211_rx(il->hw, skb);
605 il->alloc_rxb_page--;
606 rxb->page = NULL;
607}
608
609/* Called for N_RX (legacy ABG frames), or
610 * N_RX_MPDU (HT high-throughput N frames). */
611void
612il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
613{
614 struct ieee80211_hdr *header;
615 struct ieee80211_rx_status rx_status;
616 struct il_rx_pkt *pkt = rxb_addr(rxb);
617 struct il_rx_phy_res *phy_res;
618 __le32 rx_pkt_status;
619 struct il_rx_mpdu_res_start *amsdu;
620 u32 len;
621 u32 ampdu_status;
622 u32 rate_n_flags;
623
624 /**
625 * N_RX and N_RX_MPDU are handled differently.
626 * N_RX: physical layer info is in this buffer
627 * N_RX_MPDU: physical layer info was sent in separate
628 * command and cached in il->last_phy_res
629 *
630 * Here we set up local variables depending on which command is
631 * received.
632 */
633 if (pkt->hdr.cmd == N_RX) {
634 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
635 header =
636 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
637 phy_res->cfg_phy_cnt);
638
639 len = le16_to_cpu(phy_res->byte_count);
640 rx_pkt_status =
641 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
642 phy_res->cfg_phy_cnt + len);
643 ampdu_status = le32_to_cpu(rx_pkt_status);
644 } else {
645 if (!il->_4965.last_phy_res_valid) {
646 IL_ERR("MPDU frame without cached PHY data\n");
647 return;
648 }
649 phy_res = &il->_4965.last_phy_res;
650 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
651 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
652 len = le16_to_cpu(amsdu->byte_count);
653 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
654 ampdu_status =
655 il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
656 }
657
658 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
659 D_DROP("dsp size out of range [0,20]: %d/n",
660 phy_res->cfg_phy_cnt);
661 return;
662 }
663
664 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
665 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
666 D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
667 return;
668 }
669
670 /* This will be used in several places later */
671 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
672
673 /* rx_status carries information about the packet to mac80211 */
674 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
675 rx_status.band =
676 (phy_res->
677 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
678 IEEE80211_BAND_5GHZ;
679 rx_status.freq =
680 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
681 rx_status.band);
682 rx_status.rate_idx =
683 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
684 rx_status.flag = 0;
685
686 /* TSF isn't reliable. In order to allow smooth user experience,
687 * this W/A doesn't propagate it to the mac80211 */
688 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
689
690 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
691
692 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
693 rx_status.signal = il4965_calc_rssi(il, phy_res);
694
695 il_dbg_log_rx_data_frame(il, len, header);
696 D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
697 (unsigned long long)rx_status.mactime);
698
699 /*
700 * "antenna number"
701 *
702 * It seems that the antenna field in the phy flags value
703 * is actually a bit field. This is undefined by radiotap,
704 * it wants an actual antenna number but I always get "7"
705 * for most legacy frames I receive indicating that the
706 * same frame was received on all three RX chains.
707 *
708 * I think this field should be removed in favor of a
709 * new 802.11n radiotap field "RX chains" that is defined
710 * as a bitmask.
711 */
712 rx_status.antenna =
713 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
714 RX_RES_PHY_FLAGS_ANTENNA_POS;
715
716 /* set the preamble flag if appropriate */
717 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
718 rx_status.flag |= RX_FLAG_SHORTPRE;
719
720 /* Set up the HT phy flags */
721 if (rate_n_flags & RATE_MCS_HT_MSK)
722 rx_status.flag |= RX_FLAG_HT;
723 if (rate_n_flags & RATE_MCS_HT40_MSK)
724 rx_status.flag |= RX_FLAG_40MHZ;
725 if (rate_n_flags & RATE_MCS_SGI_MSK)
726 rx_status.flag |= RX_FLAG_SHORT_GI;
727
728 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
729 &rx_status);
730}
731
732/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
733 * This will be used later in il_hdl_rx() for N_RX_MPDU. */
734void
735il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
736{
737 struct il_rx_pkt *pkt = rxb_addr(rxb);
738 il->_4965.last_phy_res_valid = true;
739 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
740 sizeof(struct il_rx_phy_res));
741}
742
743static int
744il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
745 enum ieee80211_band band, u8 is_active,
746 u8 n_probes, struct il_scan_channel *scan_ch)
747{
748 struct ieee80211_channel *chan;
749 const struct ieee80211_supported_band *sband;
750 const struct il_channel_info *ch_info;
751 u16 passive_dwell = 0;
752 u16 active_dwell = 0;
753 int added, i;
754 u16 channel;
755
756 sband = il_get_hw_mode(il, band);
757 if (!sband)
758 return 0;
759
760 active_dwell = il_get_active_dwell_time(il, band, n_probes);
761 passive_dwell = il_get_passive_dwell_time(il, band, vif);
762
763 if (passive_dwell <= active_dwell)
764 passive_dwell = active_dwell + 1;
765
766 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
767 chan = il->scan_request->channels[i];
768
769 if (chan->band != band)
770 continue;
771
772 channel = chan->hw_value;
773 scan_ch->channel = cpu_to_le16(channel);
774
775 ch_info = il_get_channel_info(il, band, channel);
776 if (!il_is_channel_valid(ch_info)) {
777 D_SCAN("Channel %d is INVALID for this band.\n",
778 channel);
779 continue;
780 }
781
782 if (!is_active || il_is_channel_passive(ch_info) ||
783 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
784 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
785 else
786 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
787
788 if (n_probes)
789 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
790
791 scan_ch->active_dwell = cpu_to_le16(active_dwell);
792 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
793
794 /* Set txpower levels to defaults */
795 scan_ch->dsp_atten = 110;
796
797 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
798 * power level:
799 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
800 */
801 if (band == IEEE80211_BAND_5GHZ)
802 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
803 else
804 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
805
806 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
807 le32_to_cpu(scan_ch->type),
808 (scan_ch->
809 type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
810 (scan_ch->
811 type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
812 passive_dwell);
813
814 scan_ch++;
815 added++;
816 }
817
818 D_SCAN("total channels to scan %d\n", added);
819 return added;
820}
821
822static inline u32
823il4965_ant_idx_to_flags(u8 ant_idx)
824{
825 return BIT(ant_idx) << RATE_MCS_ANT_POS;
826}
827
828int
829il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
830{
831 struct il_host_cmd cmd = {
832 .id = C_SCAN,
833 .len = sizeof(struct il_scan_cmd),
834 .flags = CMD_SIZE_HUGE,
835 };
836 struct il_scan_cmd *scan;
837 struct il_rxon_context *ctx = &il->ctx;
838 u32 rate_flags = 0;
839 u16 cmd_len;
840 u16 rx_chain = 0;
841 enum ieee80211_band band;
842 u8 n_probes = 0;
843 u8 rx_ant = il->hw_params.valid_rx_ant;
844 u8 rate;
845 bool is_active = false;
846 int chan_mod;
847 u8 active_chains;
848 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
849 int ret;
850
851 lockdep_assert_held(&il->mutex);
852
853 ctx = il_rxon_ctx_from_vif(vif);
854
855 if (!il->scan_cmd) {
856 il->scan_cmd =
857 kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
858 GFP_KERNEL);
859 if (!il->scan_cmd) {
860 D_SCAN("fail to allocate memory for scan\n");
861 return -ENOMEM;
862 }
863 }
864 scan = il->scan_cmd;
865 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
866
867 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
868 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
869
870 if (il_is_any_associated(il)) {
871 u16 interval;
872 u32 extra;
873 u32 suspend_time = 100;
874 u32 scan_suspend_time = 100;
875
876 D_INFO("Scanning while associated...\n");
877 interval = vif->bss_conf.beacon_int;
878
879 scan->suspend_time = 0;
880 scan->max_out_time = cpu_to_le32(200 * 1024);
881 if (!interval)
882 interval = suspend_time;
883
884 extra = (suspend_time / interval) << 22;
885 scan_suspend_time =
886 (extra | ((suspend_time % interval) * 1024));
887 scan->suspend_time = cpu_to_le32(scan_suspend_time);
888 D_SCAN("suspend_time 0x%X beacon interval %d\n",
889 scan_suspend_time, interval);
890 }
891
892 if (il->scan_request->n_ssids) {
893 int i, p = 0;
894 D_SCAN("Kicking off active scan\n");
895 for (i = 0; i < il->scan_request->n_ssids; i++) {
896 /* always does wildcard anyway */
897 if (!il->scan_request->ssids[i].ssid_len)
898 continue;
899 scan->direct_scan[p].id = WLAN_EID_SSID;
900 scan->direct_scan[p].len =
901 il->scan_request->ssids[i].ssid_len;
902 memcpy(scan->direct_scan[p].ssid,
903 il->scan_request->ssids[i].ssid,
904 il->scan_request->ssids[i].ssid_len);
905 n_probes++;
906 p++;
907 }
908 is_active = true;
909 } else
910 D_SCAN("Start passive scan.\n");
911
912 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
913 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
914 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
915
916 switch (il->scan_band) {
917 case IEEE80211_BAND_2GHZ:
918 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
919 chan_mod =
920 le32_to_cpu(il->ctx.active.
921 flags & RXON_FLG_CHANNEL_MODE_MSK) >>
922 RXON_FLG_CHANNEL_MODE_POS;
923 if (chan_mod == CHANNEL_MODE_PURE_40) {
924 rate = RATE_6M_PLCP;
925 } else {
926 rate = RATE_1M_PLCP;
927 rate_flags = RATE_MCS_CCK_MSK;
928 }
929 break;
930 case IEEE80211_BAND_5GHZ:
931 rate = RATE_6M_PLCP;
932 break;
933 default:
934 IL_WARN("Invalid scan band\n");
935 return -EIO;
936 }
937
938 /*
939 * If active scanning is requested but a certain channel is
940 * marked passive, we can do active scanning if we detect
941 * transmissions.
942 *
943 * There is an issue with some firmware versions that triggers
944 * a sysassert on a "good CRC threshold" of zero (== disabled),
945 * on a radar channel even though this means that we should NOT
946 * send probes.
947 *
948 * The "good CRC threshold" is the number of frames that we
949 * need to receive during our dwell time on a channel before
950 * sending out probes -- setting this to a huge value will
951 * mean we never reach it, but at the same time work around
952 * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
953 * here instead of IL_GOOD_CRC_TH_DISABLED.
954 */
955 scan->good_CRC_th =
956 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
957
958 band = il->scan_band;
959
960 if (il->cfg->scan_rx_antennas[band])
961 rx_ant = il->cfg->scan_rx_antennas[band];
962
963 il->scan_tx_ant[band] =
964 il4965_toggle_tx_ant(il, il->scan_tx_ant[band], scan_tx_antennas);
965 rate_flags |= il4965_ant_idx_to_flags(il->scan_tx_ant[band]);
966 scan->tx_cmd.rate_n_flags =
967 il4965_hw_set_rate_n_flags(rate, rate_flags);
968
969 /* In power save mode use one chain, otherwise use all chains */
970 if (test_bit(S_POWER_PMI, &il->status)) {
971 /* rx_ant has been set to all valid chains previously */
972 active_chains =
973 rx_ant & ((u8) (il->chain_noise_data.active_chains));
974 if (!active_chains)
975 active_chains = rx_ant;
976
977 D_SCAN("chain_noise_data.active_chains: %u\n",
978 il->chain_noise_data.active_chains);
979
980 rx_ant = il4965_first_antenna(active_chains);
981 }
982
983 /* MIMO is not used here, but value is required */
984 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
985 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
986 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
987 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
988 scan->rx_chain = cpu_to_le16(rx_chain);
989
990 cmd_len =
991 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
992 vif->addr, il->scan_request->ie,
993 il->scan_request->ie_len,
994 IL_MAX_SCAN_SIZE - sizeof(*scan));
995 scan->tx_cmd.len = cpu_to_le16(cmd_len);
996
997 scan->filter_flags |=
998 (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
999
1000 scan->channel_count =
1001 il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
1002 (void *)&scan->data[cmd_len]);
1003 if (scan->channel_count == 0) {
1004 D_SCAN("channel count %d\n", scan->channel_count);
1005 return -EIO;
1006 }
1007
1008 cmd.len +=
1009 le16_to_cpu(scan->tx_cmd.len) +
1010 scan->channel_count * sizeof(struct il_scan_channel);
1011 cmd.data = scan;
1012 scan->len = cpu_to_le16(cmd.len);
1013
1014 set_bit(S_SCAN_HW, &il->status);
1015
1016 ret = il_send_cmd_sync(il, &cmd);
1017 if (ret)
1018 clear_bit(S_SCAN_HW, &il->status);
1019
1020 return ret;
1021}
1022
1023int
1024il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
1025 bool add)
1026{
1027 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1028
1029 if (add)
1030 return il4965_add_bssid_station(il, vif_priv->ctx,
1031 vif->bss_conf.bssid,
1032 &vif_priv->ibss_bssid_sta_id);
1033 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1034 vif->bss_conf.bssid);
1035}
1036
1037void
1038il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
1039{
1040 lockdep_assert_held(&il->sta_lock);
1041
1042 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1043 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1044 else {
1045 D_TX("free more than tfds_in_queue (%u:%d)\n",
1046 il->stations[sta_id].tid[tid].tfds_in_queue, freed);
1047 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1048 }
1049}
1050
1051#define IL_TX_QUEUE_MSK 0xfffff
1052
1053static bool
1054il4965_is_single_rx_stream(struct il_priv *il)
1055{
1056 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1057 il->current_ht_config.single_chain_sufficient;
1058}
1059
1060#define IL_NUM_RX_CHAINS_MULTIPLE 3
1061#define IL_NUM_RX_CHAINS_SINGLE 2
1062#define IL_NUM_IDLE_CHAINS_DUAL 2
1063#define IL_NUM_IDLE_CHAINS_SINGLE 1
1064
1065/*
1066 * Determine how many receiver/antenna chains to use.
1067 *
1068 * More provides better reception via diversity. Fewer saves power
1069 * at the expense of throughput, but only when not in powersave to
1070 * start with.
1071 *
1072 * MIMO (dual stream) requires at least 2, but works better with 3.
1073 * This does not determine *which* chains to use, just how many.
1074 */
1075static int
1076il4965_get_active_rx_chain_count(struct il_priv *il)
1077{
1078 /* # of Rx chains to use when expecting MIMO. */
1079 if (il4965_is_single_rx_stream(il))
1080 return IL_NUM_RX_CHAINS_SINGLE;
1081 else
1082 return IL_NUM_RX_CHAINS_MULTIPLE;
1083}
1084
1085/*
1086 * When we are in power saving mode, unless device support spatial
1087 * multiplexing power save, use the active count for rx chain count.
1088 */
1089static int
1090il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1091{
1092 /* # Rx chains when idling, depending on SMPS mode */
1093 switch (il->current_ht_config.smps) {
1094 case IEEE80211_SMPS_STATIC:
1095 case IEEE80211_SMPS_DYNAMIC:
1096 return IL_NUM_IDLE_CHAINS_SINGLE;
1097 case IEEE80211_SMPS_OFF:
1098 return active_cnt;
1099 default:
1100 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
1101 return active_cnt;
1102 }
1103}
1104
1105/* up to 4 chains */
1106static u8
1107il4965_count_chain_bitmap(u32 chain_bitmap)
1108{
1109 u8 res;
1110 res = (chain_bitmap & BIT(0)) >> 0;
1111 res += (chain_bitmap & BIT(1)) >> 1;
1112 res += (chain_bitmap & BIT(2)) >> 2;
1113 res += (chain_bitmap & BIT(3)) >> 3;
1114 return res;
1115}
1116
1117/**
1118 * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1119 *
1120 * Selects how many and which Rx receivers/antennas/chains to use.
1121 * This should not be used for scan command ... it puts data in wrong place.
1122 */
1123void
1124il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
1125{
1126 bool is_single = il4965_is_single_rx_stream(il);
1127 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1128 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1129 u32 active_chains;
1130 u16 rx_chain;
1131
1132 /* Tell uCode which antennas are actually connected.
1133 * Before first association, we assume all antennas are connected.
1134 * Just after first association, il4965_chain_noise_calibration()
1135 * checks which antennas actually *are* connected. */
1136 if (il->chain_noise_data.active_chains)
1137 active_chains = il->chain_noise_data.active_chains;
1138 else
1139 active_chains = il->hw_params.valid_rx_ant;
1140
1141 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1142
1143 /* How many receivers should we use? */
1144 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1145 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1146
1147 /* correct rx chain count according hw settings
1148 * and chain noise calibration
1149 */
1150 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1151 if (valid_rx_cnt < active_rx_cnt)
1152 active_rx_cnt = valid_rx_cnt;
1153
1154 if (valid_rx_cnt < idle_rx_cnt)
1155 idle_rx_cnt = valid_rx_cnt;
1156
1157 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1158 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1159
1160 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1161
1162 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1163 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1164 else
1165 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1166
1167 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
1168 active_rx_cnt, idle_rx_cnt);
1169
1170 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1171 active_rx_cnt < idle_rx_cnt);
1172}
1173
1174u8
1175il4965_toggle_tx_ant(struct il_priv *il, u8 ant, u8 valid)
1176{
1177 int i;
1178 u8 ind = ant;
1179
1180 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1181 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1182 if (valid & BIT(ind))
1183 return ind;
1184 }
1185 return ant;
1186}
1187
1188static const char *
1189il4965_get_fh_string(int cmd)
1190{
1191 switch (cmd) {
1192 IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
1193 IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
1194 IL_CMD(FH49_RSCSR_CHNL0_WPTR);
1195 IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
1196 IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
1197 IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
1198 IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1199 IL_CMD(FH49_TSSR_TX_STATUS_REG);
1200 IL_CMD(FH49_TSSR_TX_ERROR_REG);
1201 default:
1202 return "UNKNOWN";
1203 }
1204}
1205
1206int
1207il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1208{
1209 int i;
1210#ifdef CONFIG_IWLEGACY_DEBUG
1211 int pos = 0;
1212 size_t bufsz = 0;
1213#endif
1214 static const u32 fh_tbl[] = {
1215 FH49_RSCSR_CHNL0_STTS_WPTR_REG,
1216 FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
1217 FH49_RSCSR_CHNL0_WPTR,
1218 FH49_MEM_RCSR_CHNL0_CONFIG_REG,
1219 FH49_MEM_RSSR_SHARED_CTRL_REG,
1220 FH49_MEM_RSSR_RX_STATUS_REG,
1221 FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1222 FH49_TSSR_TX_STATUS_REG,
1223 FH49_TSSR_TX_ERROR_REG
1224 };
1225#ifdef CONFIG_IWLEGACY_DEBUG
1226 if (display) {
1227 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1228 *buf = kmalloc(bufsz, GFP_KERNEL);
1229 if (!*buf)
1230 return -ENOMEM;
1231 pos +=
1232 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
1233 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1234 pos +=
1235 scnprintf(*buf + pos, bufsz - pos,
1236 " %34s: 0X%08x\n",
1237 il4965_get_fh_string(fh_tbl[i]),
1238 il_rd(il, fh_tbl[i]));
1239 }
1240 return pos;
1241 }
1242#endif
1243 IL_ERR("FH register values:\n");
1244 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1245 IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
1246 il_rd(il, fh_tbl[i]));
1247 }
1248 return 0;
1249}
1250
1251void
1252il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1253{
1254 struct il_rx_pkt *pkt = rxb_addr(rxb);
1255 struct il_missed_beacon_notif *missed_beacon;
1256
1257 missed_beacon = &pkt->u.missed_beacon;
1258 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1259 il->missed_beacon_threshold) {
1260 D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1261 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1262 le32_to_cpu(missed_beacon->total_missed_becons),
1263 le32_to_cpu(missed_beacon->num_recvd_beacons),
1264 le32_to_cpu(missed_beacon->num_expected_beacons));
1265 if (!test_bit(S_SCANNING, &il->status))
1266 il4965_init_sensitivity(il);
1267 }
1268}
1269
1270/* Calculate noise level, based on measurements during network silence just
1271 * before arriving beacon. This measurement can be done only if we know
1272 * exactly when to expect beacons, therefore only when we're associated. */
1273static void
1274il4965_rx_calc_noise(struct il_priv *il)
1275{
1276 struct stats_rx_non_phy *rx_info;
1277 int num_active_rx = 0;
1278 int total_silence = 0;
1279 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1280 int last_rx_noise;
1281
1282 rx_info = &(il->_4965.stats.rx.general);
1283 bcn_silence_a =
1284 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1285 bcn_silence_b =
1286 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1287 bcn_silence_c =
1288 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1289
1290 if (bcn_silence_a) {
1291 total_silence += bcn_silence_a;
1292 num_active_rx++;
1293 }
1294 if (bcn_silence_b) {
1295 total_silence += bcn_silence_b;
1296 num_active_rx++;
1297 }
1298 if (bcn_silence_c) {
1299 total_silence += bcn_silence_c;
1300 num_active_rx++;
1301 }
1302
1303 /* Average among active antennas */
1304 if (num_active_rx)
1305 last_rx_noise = (total_silence / num_active_rx) - 107;
1306 else
1307 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1308
1309 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
1310 bcn_silence_b, bcn_silence_c, last_rx_noise);
1311}
1312
1313#ifdef CONFIG_IWLEGACY_DEBUGFS
1314/*
1315 * based on the assumption of all stats counter are in DWORD
1316 * FIXME: This function is for debugging, do not deal with
1317 * the case of counters roll-over.
1318 */
1319static void
1320il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1321{
1322 int i, size;
1323 __le32 *prev_stats;
1324 u32 *accum_stats;
1325 u32 *delta, *max_delta;
1326 struct stats_general_common *general, *accum_general;
1327 struct stats_tx *tx, *accum_tx;
1328
1329 prev_stats = (__le32 *) &il->_4965.stats;
1330 accum_stats = (u32 *) &il->_4965.accum_stats;
1331 size = sizeof(struct il_notif_stats);
1332 general = &il->_4965.stats.general.common;
1333 accum_general = &il->_4965.accum_stats.general.common;
1334 tx = &il->_4965.stats.tx;
1335 accum_tx = &il->_4965.accum_stats.tx;
1336 delta = (u32 *) &il->_4965.delta_stats;
1337 max_delta = (u32 *) &il->_4965.max_delta;
1338
1339 for (i = sizeof(__le32); i < size;
1340 i +=
1341 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
1342 accum_stats++) {
1343 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1344 *delta =
1345 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
1346 *accum_stats += *delta;
1347 if (*delta > *max_delta)
1348 *max_delta = *delta;
1349 }
1350 }
1351
1352 /* reset accumulative stats for "no-counter" type stats */
1353 accum_general->temperature = general->temperature;
1354 accum_general->ttl_timestamp = general->ttl_timestamp;
1355}
1356#endif
1357
1358#define REG_RECALIB_PERIOD (60)
1359
1360void
1361il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1362{
1363 int change;
1364 struct il_rx_pkt *pkt = rxb_addr(rxb);
1365
1366 D_RX("Statistics notification received (%d vs %d).\n",
1367 (int)sizeof(struct il_notif_stats),
1368 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
1369
1370 change =
1371 ((il->_4965.stats.general.common.temperature !=
1372 pkt->u.stats.general.common.temperature) ||
1373 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
1374 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
1375#ifdef CONFIG_IWLEGACY_DEBUGFS
1376 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
1377#endif
1378
1379 /* TODO: reading some of stats is unneeded */
1380 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
1381
1382 set_bit(S_STATS, &il->status);
1383
1384 /* Reschedule the stats timer to occur in
1385 * REG_RECALIB_PERIOD seconds to ensure we get a
1386 * thermal update even if the uCode doesn't give
1387 * us one */
1388 mod_timer(&il->stats_periodic,
1389 jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
1390
1391 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1392 (pkt->hdr.cmd == N_STATS)) {
1393 il4965_rx_calc_noise(il);
1394 queue_work(il->workqueue, &il->run_time_calib_work);
1395 }
1396 if (il->cfg->ops->lib->temp_ops.temperature && change)
1397 il->cfg->ops->lib->temp_ops.temperature(il);
1398}
1399
1400void
1401il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1402{
1403 struct il_rx_pkt *pkt = rxb_addr(rxb);
1404
1405 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1406#ifdef CONFIG_IWLEGACY_DEBUGFS
1407 memset(&il->_4965.accum_stats, 0,
1408 sizeof(struct il_notif_stats));
1409 memset(&il->_4965.delta_stats, 0,
1410 sizeof(struct il_notif_stats));
1411 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
1412#endif
1413 D_RX("Statistics have been cleared\n");
1414 }
1415 il4965_hdl_stats(il, rxb);
1416}
1417
1418
1419/*
1420 * mac80211 queues, ACs, hardware queues, FIFOs.
1421 *
1422 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
1423 *
1424 * Mac80211 uses the following numbers, which we get as from it
1425 * by way of skb_get_queue_mapping(skb):
1426 *
1427 * VO 0
1428 * VI 1
1429 * BE 2
1430 * BK 3
1431 *
1432 *
1433 * Regular (not A-MPDU) frames are put into hardware queues corresponding
1434 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
1435 * own queue per aggregation session (RA/TID combination), such queues are
1436 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
1437 * order to map frames to the right queue, we also need an AC->hw queue
1438 * mapping. This is implemented here.
1439 *
1440 * Due to the way hw queues are set up (by the hw specific modules like
1441 * 4965.c), the AC->hw queue mapping is the identity
1442 * mapping.
1443 */
1444
1445static const u8 tid_to_ac[] = {
1446 IEEE80211_AC_BE,
1447 IEEE80211_AC_BK,
1448 IEEE80211_AC_BK,
1449 IEEE80211_AC_BE,
1450 IEEE80211_AC_VI,
1451 IEEE80211_AC_VI,
1452 IEEE80211_AC_VO,
1453 IEEE80211_AC_VO
1454};
1455
1456static inline int
1457il4965_get_ac_from_tid(u16 tid)
1458{
1459 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1460 return tid_to_ac[tid];
1461
1462 /* no support for TIDs 8-15 yet */
1463 return -EINVAL;
1464}
1465
1466static inline int
1467il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
1468{
1469 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1470 return ctx->ac_to_fifo[tid_to_ac[tid]];
1471
1472 /* no support for TIDs 8-15 yet */
1473 return -EINVAL;
1474}
1475
1476/*
1477 * handle build C_TX command notification.
1478 */
1479static void
1480il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1481 struct il_tx_cmd *tx_cmd,
1482 struct ieee80211_tx_info *info,
1483 struct ieee80211_hdr *hdr, u8 std_id)
1484{
1485 __le16 fc = hdr->frame_control;
1486 __le32 tx_flags = tx_cmd->tx_flags;
1487
1488 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1489 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1490 tx_flags |= TX_CMD_FLG_ACK_MSK;
1491 if (ieee80211_is_mgmt(fc))
1492 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1493 if (ieee80211_is_probe_resp(fc) &&
1494 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1495 tx_flags |= TX_CMD_FLG_TSF_MSK;
1496 } else {
1497 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1498 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1499 }
1500
1501 if (ieee80211_is_back_req(fc))
1502 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1503
1504 tx_cmd->sta_id = std_id;
1505 if (ieee80211_has_morefrags(fc))
1506 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1507
1508 if (ieee80211_is_data_qos(fc)) {
1509 u8 *qc = ieee80211_get_qos_ctl(hdr);
1510 tx_cmd->tid_tspec = qc[0] & 0xf;
1511 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1512 } else {
1513 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1514 }
1515
1516 il_tx_cmd_protection(il, info, fc, &tx_flags);
1517
1518 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1519 if (ieee80211_is_mgmt(fc)) {
1520 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1521 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1522 else
1523 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1524 } else {
1525 tx_cmd->timeout.pm_frame_timeout = 0;
1526 }
1527
1528 tx_cmd->driver_txop = 0;
1529 tx_cmd->tx_flags = tx_flags;
1530 tx_cmd->next_frame_len = 0;
1531}
1532
1533#define RTS_DFAULT_RETRY_LIMIT 60
1534
1535static void
1536il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
1537 struct ieee80211_tx_info *info, __le16 fc)
1538{
1539 u32 rate_flags;
1540 int rate_idx;
1541 u8 rts_retry_limit;
1542 u8 data_retry_limit;
1543 u8 rate_plcp;
1544
1545 /* Set retry limit on DATA packets and Probe Responses */
1546 if (ieee80211_is_probe_resp(fc))
1547 data_retry_limit = 3;
1548 else
1549 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1550 tx_cmd->data_retry_limit = data_retry_limit;
1551
1552 /* Set retry limit on RTS packets */
1553 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
1554 if (data_retry_limit < rts_retry_limit)
1555 rts_retry_limit = data_retry_limit;
1556 tx_cmd->rts_retry_limit = rts_retry_limit;
1557
1558 /* DATA packets will use the uCode station table for rate/antenna
1559 * selection */
1560 if (ieee80211_is_data(fc)) {
1561 tx_cmd->initial_rate_idx = 0;
1562 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1563 return;
1564 }
1565
1566 /**
1567 * If the current TX rate stored in mac80211 has the MCS bit set, it's
1568 * not really a TX rate. Thus, we use the lowest supported rate for
1569 * this band. Also use the lowest supported rate if the stored rate
1570 * idx is invalid.
1571 */
1572 rate_idx = info->control.rates[0].idx;
1573 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1574 || rate_idx > RATE_COUNT_LEGACY)
1575 rate_idx =
1576 rate_lowest_index(&il->bands[info->band],
1577 info->control.sta);
1578 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1579 if (info->band == IEEE80211_BAND_5GHZ)
1580 rate_idx += IL_FIRST_OFDM_RATE;
1581 /* Get PLCP rate for tx_cmd->rate_n_flags */
1582 rate_plcp = il_rates[rate_idx].plcp;
1583 /* Zero out flags for this packet */
1584 rate_flags = 0;
1585
1586 /* Set CCK flag as needed */
1587 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1588 rate_flags |= RATE_MCS_CCK_MSK;
1589
1590 /* Set up antennas */
1591 il->mgmt_tx_ant =
1592 il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
1593 il->hw_params.valid_tx_ant);
1594
1595 rate_flags |= il4965_ant_idx_to_flags(il->mgmt_tx_ant);
1596
1597 /* Set the rate in the TX cmd */
1598 tx_cmd->rate_n_flags =
1599 il4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
1600}
1601
1602static void
1603il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1604 struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
1605 int sta_id)
1606{
1607 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1608
1609 switch (keyconf->cipher) {
1610 case WLAN_CIPHER_SUITE_CCMP:
1611 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1612 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1613 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1614 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1615 D_TX("tx_cmd with AES hwcrypto\n");
1616 break;
1617
1618 case WLAN_CIPHER_SUITE_TKIP:
1619 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1620 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1621 D_TX("tx_cmd with tkip hwcrypto\n");
1622 break;
1623
1624 case WLAN_CIPHER_SUITE_WEP104:
1625 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1626 /* fall through */
1627 case WLAN_CIPHER_SUITE_WEP40:
1628 tx_cmd->sec_ctl |=
1629 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
1630 TX_CMD_SEC_SHIFT);
1631
1632 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1633
1634 D_TX("Configuring packet for WEP encryption " "with key %d\n",
1635 keyconf->keyidx);
1636 break;
1637
1638 default:
1639 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1640 break;
1641 }
1642}
1643
1644/*
1645 * start C_TX command process
1646 */
1647int
1648il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1649{
1650 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1651 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1652 struct ieee80211_sta *sta = info->control.sta;
1653 struct il_station_priv *sta_priv = NULL;
1654 struct il_tx_queue *txq;
1655 struct il_queue *q;
1656 struct il_device_cmd *out_cmd;
1657 struct il_cmd_meta *out_meta;
1658 struct il_tx_cmd *tx_cmd;
1659 struct il_rxon_context *ctx = &il->ctx;
1660 int txq_id;
1661 dma_addr_t phys_addr;
1662 dma_addr_t txcmd_phys;
1663 dma_addr_t scratch_phys;
1664 u16 len, firstlen, secondlen;
1665 u16 seq_number = 0;
1666 __le16 fc;
1667 u8 hdr_len;
1668 u8 sta_id;
1669 u8 wait_write_ptr = 0;
1670 u8 tid = 0;
1671 u8 *qc = NULL;
1672 unsigned long flags;
1673 bool is_agg = false;
1674
1675 if (info->control.vif)
1676 ctx = il_rxon_ctx_from_vif(info->control.vif);
1677
1678 spin_lock_irqsave(&il->lock, flags);
1679 if (il_is_rfkill(il)) {
1680 D_DROP("Dropping - RF KILL\n");
1681 goto drop_unlock;
1682 }
1683
1684 fc = hdr->frame_control;
1685
1686#ifdef CONFIG_IWLEGACY_DEBUG
1687 if (ieee80211_is_auth(fc))
1688 D_TX("Sending AUTH frame\n");
1689 else if (ieee80211_is_assoc_req(fc))
1690 D_TX("Sending ASSOC frame\n");
1691 else if (ieee80211_is_reassoc_req(fc))
1692 D_TX("Sending REASSOC frame\n");
1693#endif
1694
1695 hdr_len = ieee80211_hdrlen(fc);
1696
1697 /* For management frames use broadcast id to do not break aggregation */
1698 if (!ieee80211_is_data(fc))
1699 sta_id = ctx->bcast_sta_id;
1700 else {
1701 /* Find idx into station table for destination station */
1702 sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
1703
1704 if (sta_id == IL_INVALID_STATION) {
1705 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
1706 goto drop_unlock;
1707 }
1708 }
1709
1710 D_TX("station Id %d\n", sta_id);
1711
1712 if (sta)
1713 sta_priv = (void *)sta->drv_priv;
1714
1715 if (sta_priv && sta_priv->asleep &&
1716 (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
1717 /*
1718 * This sends an asynchronous command to the device,
1719 * but we can rely on it being processed before the
1720 * next frame is processed -- and the next frame to
1721 * this station is the one that will consume this
1722 * counter.
1723 * For now set the counter to just 1 since we do not
1724 * support uAPSD yet.
1725 */
1726 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1727 }
1728
1729 /*
1730 * Send this frame after DTIM -- there's a special queue
1731 * reserved for this for contexts that support AP mode.
1732 */
1733 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1734 txq_id = ctx->mcast_queue;
1735 /*
1736 * The microcode will clear the more data
1737 * bit in the last frame it transmits.
1738 */
1739 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1740 } else
1741 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
1742
1743 /* irqs already disabled/saved above when locking il->lock */
1744 spin_lock(&il->sta_lock);
1745
1746 if (ieee80211_is_data_qos(fc)) {
1747 qc = ieee80211_get_qos_ctl(hdr);
1748 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1749 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1750 spin_unlock(&il->sta_lock);
1751 goto drop_unlock;
1752 }
1753 seq_number = il->stations[sta_id].tid[tid].seq_number;
1754 seq_number &= IEEE80211_SCTL_SEQ;
1755 hdr->seq_ctrl =
1756 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
1757 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1758 seq_number += 0x10;
1759 /* aggregation is on for this <sta,tid> */
1760 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1761 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1762 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1763 is_agg = true;
1764 }
1765 }
1766
1767 txq = &il->txq[txq_id];
1768 q = &txq->q;
1769
1770 if (unlikely(il_queue_space(q) < q->high_mark)) {
1771 spin_unlock(&il->sta_lock);
1772 goto drop_unlock;
1773 }
1774
1775 if (ieee80211_is_data_qos(fc)) {
1776 il->stations[sta_id].tid[tid].tfds_in_queue++;
1777 if (!ieee80211_has_morefrags(fc))
1778 il->stations[sta_id].tid[tid].seq_number = seq_number;
1779 }
1780
1781 spin_unlock(&il->sta_lock);
1782
1783 /* Set up driver data for this TFD */
1784 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
1785 txq->txb[q->write_ptr].skb = skb;
1786 txq->txb[q->write_ptr].ctx = ctx;
1787
1788 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1789 out_cmd = txq->cmd[q->write_ptr];
1790 out_meta = &txq->meta[q->write_ptr];
1791 tx_cmd = &out_cmd->cmd.tx;
1792 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1793 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1794
1795 /*
1796 * Set up the Tx-command (not MAC!) header.
1797 * Store the chosen Tx queue and TFD idx within the sequence field;
1798 * after Tx, uCode's Tx response will return this value so driver can
1799 * locate the frame within the tx queue and do post-tx processing.
1800 */
1801 out_cmd->hdr.cmd = C_TX;
1802 out_cmd->hdr.sequence =
1803 cpu_to_le16((u16)
1804 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
1805
1806 /* Copy MAC header from skb into command buffer */
1807 memcpy(tx_cmd->hdr, hdr, hdr_len);
1808
1809 /* Total # bytes to be transmitted */
1810 len = (u16) skb->len;
1811 tx_cmd->len = cpu_to_le16(len);
1812
1813 if (info->control.hw_key)
1814 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1815
1816 /* TODO need this for burst mode later on */
1817 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1818 il_dbg_log_tx_data_frame(il, len, hdr);
1819
1820 il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
1821
1822 il_update_stats(il, true, fc, len);
1823 /*
1824 * Use the first empty entry in this queue's command buffer array
1825 * to contain the Tx command and MAC header concatenated together
1826 * (payload data will be in another buffer).
1827 * Size of this varies, due to varying MAC header length.
1828 * If end is not dword aligned, we'll have 2 extra bytes at the end
1829 * of the MAC header (device reads on dword boundaries).
1830 * We'll tell device about this padding later.
1831 */
1832 len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
1833 firstlen = (len + 3) & ~3;
1834
1835 /* Tell NIC about any 2-byte padding after MAC header */
1836 if (firstlen != len)
1837 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1838
1839 /* Physical address of this Tx command's header (not MAC header!),
1840 * within command buffer array. */
1841 txcmd_phys =
1842 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
1843 PCI_DMA_BIDIRECTIONAL);
1844 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1845 dma_unmap_len_set(out_meta, len, firstlen);
1846 /* Add buffer containing Tx command and MAC(!) header to TFD's
1847 * first entry */
1848 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
1849 1, 0);
1850
1851 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1852 txq->need_update = 1;
1853 } else {
1854 wait_write_ptr = 1;
1855 txq->need_update = 0;
1856 }
1857
1858 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1859 * if any (802.11 null frames have no payload). */
1860 secondlen = skb->len - hdr_len;
1861 if (secondlen > 0) {
1862 phys_addr =
1863 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
1864 PCI_DMA_TODEVICE);
1865 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
1866 secondlen, 0, 0);
1867 }
1868
1869 scratch_phys =
1870 txcmd_phys + sizeof(struct il_cmd_header) +
1871 offsetof(struct il_tx_cmd, scratch);
1872
1873 /* take back ownership of DMA buffer to enable update */
1874 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
1875 PCI_DMA_BIDIRECTIONAL);
1876 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1877 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1878
1879 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
1880 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1881 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
1882 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
1883
1884 /* Set up entry for this TFD in Tx byte-count array */
1885 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1886 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
1887 le16_to_cpu(tx_cmd->
1888 len));
1889
1890 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
1891 PCI_DMA_BIDIRECTIONAL);
1892
1893 /* Tell device the write idx *just past* this latest filled TFD */
1894 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1895 il_txq_update_write_ptr(il, txq);
1896 spin_unlock_irqrestore(&il->lock, flags);
1897
1898 /*
1899 * At this point the frame is "transmitted" successfully
1900 * and we will get a TX status notification eventually,
1901 * regardless of the value of ret. "ret" only indicates
1902 * whether or not we should update the write pointer.
1903 */
1904
1905 /*
1906 * Avoid atomic ops if it isn't an associated client.
1907 * Also, if this is a packet for aggregation, don't
1908 * increase the counter because the ucode will stop
1909 * aggregation queues when their respective station
1910 * goes to sleep.
1911 */
1912 if (sta_priv && sta_priv->client && !is_agg)
1913 atomic_inc(&sta_priv->pending_frames);
1914
1915 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1916 if (wait_write_ptr) {
1917 spin_lock_irqsave(&il->lock, flags);
1918 txq->need_update = 1;
1919 il_txq_update_write_ptr(il, txq);
1920 spin_unlock_irqrestore(&il->lock, flags);
1921 } else {
1922 il_stop_queue(il, txq);
1923 }
1924 }
1925
1926 return 0;
1927
1928drop_unlock:
1929 spin_unlock_irqrestore(&il->lock, flags);
1930 return -1;
1931}
1932
1933static inline int
1934il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1935{
1936 ptr->addr =
1937 dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
1938 if (!ptr->addr)
1939 return -ENOMEM;
1940 ptr->size = size;
1941 return 0;
1942}
1943
1944static inline void
1945il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
1946{
1947 if (unlikely(!ptr->addr))
1948 return;
1949
1950 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1951 memset(ptr, 0, sizeof(*ptr));
1952}
1953
1954/**
1955 * il4965_hw_txq_ctx_free - Free TXQ Context
1956 *
1957 * Destroy all TX DMA queues and structures
1958 */
1959void
1960il4965_hw_txq_ctx_free(struct il_priv *il)
1961{
1962 int txq_id;
1963
1964 /* Tx queues */
1965 if (il->txq) {
1966 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1967 if (txq_id == il->cmd_queue)
1968 il_cmd_queue_free(il);
1969 else
1970 il_tx_queue_free(il, txq_id);
1971 }
1972 il4965_free_dma_ptr(il, &il->kw);
1973
1974 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1975
1976 /* free tx queue structure */
1977 il_txq_mem(il);
1978}
1979
1980/**
1981 * il4965_txq_ctx_alloc - allocate TX queue context
1982 * Allocate all Tx DMA structures and initialize them
1983 *
1984 * @param il
1985 * @return error code
1986 */
1987int
1988il4965_txq_ctx_alloc(struct il_priv *il)
1989{
1990 int ret;
1991 int txq_id, slots_num;
1992 unsigned long flags;
1993
1994 /* Free all tx/cmd queues and keep-warm buffer */
1995 il4965_hw_txq_ctx_free(il);
1996
1997 ret =
1998 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1999 il->hw_params.scd_bc_tbls_size);
2000 if (ret) {
2001 IL_ERR("Scheduler BC Table allocation failed\n");
2002 goto error_bc_tbls;
2003 }
2004 /* Alloc keep-warm buffer */
2005 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
2006 if (ret) {
2007 IL_ERR("Keep Warm allocation failed\n");
2008 goto error_kw;
2009 }
2010
2011 /* allocate tx queue structure */
2012 ret = il_alloc_txq_mem(il);
2013 if (ret)
2014 goto error;
2015
2016 spin_lock_irqsave(&il->lock, flags);
2017
2018 /* Turn off all Tx DMA fifos */
2019 il4965_txq_set_sched(il, 0);
2020
2021 /* Tell NIC where to find the "keep warm" buffer */
2022 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2023
2024 spin_unlock_irqrestore(&il->lock, flags);
2025
2026 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
2027 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2028 slots_num =
2029 (txq_id ==
2030 il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2031 ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
2032 if (ret) {
2033 IL_ERR("Tx %d queue init failed\n", txq_id);
2034 goto error;
2035 }
2036 }
2037
2038 return ret;
2039
2040error:
2041 il4965_hw_txq_ctx_free(il);
2042 il4965_free_dma_ptr(il, &il->kw);
2043error_kw:
2044 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2045error_bc_tbls:
2046 return ret;
2047}
2048
2049void
2050il4965_txq_ctx_reset(struct il_priv *il)
2051{
2052 int txq_id, slots_num;
2053 unsigned long flags;
2054
2055 spin_lock_irqsave(&il->lock, flags);
2056
2057 /* Turn off all Tx DMA fifos */
2058 il4965_txq_set_sched(il, 0);
2059
2060 /* Tell NIC where to find the "keep warm" buffer */
2061 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2062
2063 spin_unlock_irqrestore(&il->lock, flags);
2064
2065 /* Alloc and init all Tx queues, including the command queue (#4) */
2066 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2067 slots_num =
2068 txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2069 il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
2070 }
2071}
2072
2073/**
2074 * il4965_txq_ctx_stop - Stop all Tx DMA channels
2075 */
2076void
2077il4965_txq_ctx_stop(struct il_priv *il)
2078{
2079 int ch, txq_id;
2080 unsigned long flags;
2081
2082 /* Turn off all Tx DMA fifos */
2083 spin_lock_irqsave(&il->lock, flags);
2084
2085 il4965_txq_set_sched(il, 0);
2086
2087 /* Stop each Tx DMA channel, and wait for it to be idle */
2088 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2089 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2090 if (il_poll_bit
2091 (il, FH49_TSSR_TX_STATUS_REG,
2092 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
2093 IL_ERR("Failing on timeout while stopping"
2094 " DMA channel %d [0x%08x]", ch,
2095 il_rd(il, FH49_TSSR_TX_STATUS_REG));
2096 }
2097 spin_unlock_irqrestore(&il->lock, flags);
2098
2099 if (!il->txq)
2100 return;
2101
2102 /* Unmap DMA from host system and free skb's */
2103 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2104 if (txq_id == il->cmd_queue)
2105 il_cmd_queue_unmap(il);
2106 else
2107 il_tx_queue_unmap(il, txq_id);
2108}
2109
2110/*
2111 * Find first available (lowest unused) Tx Queue, mark it "active".
2112 * Called only when finding queue for aggregation.
2113 * Should never return anything < 7, because they should already
2114 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
2115 */
2116static int
2117il4965_txq_ctx_activate_free(struct il_priv *il)
2118{
2119 int txq_id;
2120
2121 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2122 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2123 return txq_id;
2124 return -1;
2125}
2126
2127/**
2128 * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
2129 */
2130static void
2131il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
2132{
2133 /* Simply stop the queue, but don't change any configuration;
2134 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
2135 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2136 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2137 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2138}
2139
2140/**
2141 * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
2142 */
2143static int
2144il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
2145{
2146 u32 tbl_dw_addr;
2147 u32 tbl_dw;
2148 u16 scd_q2ratid;
2149
2150 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2151
2152 tbl_dw_addr =
2153 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2154
2155 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2156
2157 if (txq_id & 0x1)
2158 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2159 else
2160 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2161
2162 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2163
2164 return 0;
2165}
2166
2167/**
2168 * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
2169 *
2170 * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
2171 * i.e. it must be one of the higher queues used for aggregation
2172 */
2173static int
2174il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
2175 int tid, u16 ssn_idx)
2176{
2177 unsigned long flags;
2178 u16 ra_tid;
2179 int ret;
2180
2181 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2182 (IL49_FIRST_AMPDU_QUEUE +
2183 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2184 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2185 txq_id, IL49_FIRST_AMPDU_QUEUE,
2186 IL49_FIRST_AMPDU_QUEUE +
2187 il->cfg->base_params->num_of_ampdu_queues - 1);
2188 return -EINVAL;
2189 }
2190
2191 ra_tid = BUILD_RAxTID(sta_id, tid);
2192
2193 /* Modify device's station table to Tx this TID */
2194 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2195 if (ret)
2196 return ret;
2197
2198 spin_lock_irqsave(&il->lock, flags);
2199
2200 /* Stop this Tx queue before configuring it */
2201 il4965_tx_queue_stop_scheduler(il, txq_id);
2202
2203 /* Map receiver-address / traffic-ID to this queue */
2204 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2205
2206 /* Set this queue as a chain-building queue */
2207 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2208
2209 /* Place first TFD at idx corresponding to start sequence number.
2210 * Assumes that ssn_idx is valid (!= 0xFFF) */
2211 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2212 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2213 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2214
2215 /* Set up Tx win size and frame limit for this queue */
2216 il_write_targ_mem(il,
2217 il->scd_base_addr +
2218 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2219 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
2220 & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2221
2222 il_write_targ_mem(il,
2223 il->scd_base_addr +
2224 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2225 (SCD_FRAME_LIMIT <<
2226 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2227 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2228
2229 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2230
2231 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
2232 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2233
2234 spin_unlock_irqrestore(&il->lock, flags);
2235
2236 return 0;
2237}
2238
2239int
2240il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2241 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
2242{
2243 int sta_id;
2244 int tx_fifo;
2245 int txq_id;
2246 int ret;
2247 unsigned long flags;
2248 struct il_tid_data *tid_data;
2249
2250 tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2251 if (unlikely(tx_fifo < 0))
2252 return tx_fifo;
2253
2254 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
2255
2256 sta_id = il_sta_id(sta);
2257 if (sta_id == IL_INVALID_STATION) {
2258 IL_ERR("Start AGG on invalid station\n");
2259 return -ENXIO;
2260 }
2261 if (unlikely(tid >= MAX_TID_COUNT))
2262 return -EINVAL;
2263
2264 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2265 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2266 return -ENXIO;
2267 }
2268
2269 txq_id = il4965_txq_ctx_activate_free(il);
2270 if (txq_id == -1) {
2271 IL_ERR("No free aggregation queue available\n");
2272 return -ENXIO;
2273 }
2274
2275 spin_lock_irqsave(&il->sta_lock, flags);
2276 tid_data = &il->stations[sta_id].tid[tid];
2277 *ssn = SEQ_TO_SN(tid_data->seq_number);
2278 tid_data->agg.txq_id = txq_id;
2279 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2280 spin_unlock_irqrestore(&il->sta_lock, flags);
2281
2282 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
2283 if (ret)
2284 return ret;
2285
2286 spin_lock_irqsave(&il->sta_lock, flags);
2287 tid_data = &il->stations[sta_id].tid[tid];
2288 if (tid_data->tfds_in_queue == 0) {
2289 D_HT("HW queue is empty\n");
2290 tid_data->agg.state = IL_AGG_ON;
2291 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2292 } else {
2293 D_HT("HW queue is NOT empty: %d packets in HW queue\n",
2294 tid_data->tfds_in_queue);
2295 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2296 }
2297 spin_unlock_irqrestore(&il->sta_lock, flags);
2298 return ret;
2299}
2300
2301/**
2302 * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
2303 * il->lock must be held by the caller
2304 */
2305static int
2306il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
2307{
2308 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2309 (IL49_FIRST_AMPDU_QUEUE +
2310 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2311 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2312 txq_id, IL49_FIRST_AMPDU_QUEUE,
2313 IL49_FIRST_AMPDU_QUEUE +
2314 il->cfg->base_params->num_of_ampdu_queues - 1);
2315 return -EINVAL;
2316 }
2317
2318 il4965_tx_queue_stop_scheduler(il, txq_id);
2319
2320 il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2321
2322 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2323 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2324 /* supposes that ssn_idx is valid (!= 0xFFF) */
2325 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2326
2327 il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2328 il_txq_ctx_deactivate(il, txq_id);
2329 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2330
2331 return 0;
2332}
2333
2334int
2335il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2336 struct ieee80211_sta *sta, u16 tid)
2337{
2338 int tx_fifo_id, txq_id, sta_id, ssn;
2339 struct il_tid_data *tid_data;
2340 int write_ptr, read_ptr;
2341 unsigned long flags;
2342
2343 tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2344 if (unlikely(tx_fifo_id < 0))
2345 return tx_fifo_id;
2346
2347 sta_id = il_sta_id(sta);
2348
2349 if (sta_id == IL_INVALID_STATION) {
2350 IL_ERR("Invalid station for AGG tid %d\n", tid);
2351 return -ENXIO;
2352 }
2353
2354 spin_lock_irqsave(&il->sta_lock, flags);
2355
2356 tid_data = &il->stations[sta_id].tid[tid];
2357 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2358 txq_id = tid_data->agg.txq_id;
2359
2360 switch (il->stations[sta_id].tid[tid].agg.state) {
2361 case IL_EMPTYING_HW_QUEUE_ADDBA:
2362 /*
2363 * This can happen if the peer stops aggregation
2364 * again before we've had a chance to drain the
2365 * queue we selected previously, i.e. before the
2366 * session was really started completely.
2367 */
2368 D_HT("AGG stop before setup done\n");
2369 goto turn_off;
2370 case IL_AGG_ON:
2371 break;
2372 default:
2373 IL_WARN("Stopping AGG while state not ON or starting\n");
2374 }
2375
2376 write_ptr = il->txq[txq_id].q.write_ptr;
2377 read_ptr = il->txq[txq_id].q.read_ptr;
2378
2379 /* The queue is not empty */
2380 if (write_ptr != read_ptr) {
2381 D_HT("Stopping a non empty AGG HW QUEUE\n");
2382 il->stations[sta_id].tid[tid].agg.state =
2383 IL_EMPTYING_HW_QUEUE_DELBA;
2384 spin_unlock_irqrestore(&il->sta_lock, flags);
2385 return 0;
2386 }
2387
2388 D_HT("HW queue is empty\n");
2389turn_off:
2390 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2391
2392 /* do not restore/save irqs */
2393 spin_unlock(&il->sta_lock);
2394 spin_lock(&il->lock);
2395
2396 /*
2397 * the only reason this call can fail is queue number out of range,
2398 * which can happen if uCode is reloaded and all the station
2399 * information are lost. if it is outside the range, there is no need
2400 * to deactivate the uCode queue, just return "success" to allow
2401 * mac80211 to clean up it own data.
2402 */
2403 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2404 spin_unlock_irqrestore(&il->lock, flags);
2405
2406 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2407
2408 return 0;
2409}
2410
2411int
2412il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2413{
2414 struct il_queue *q = &il->txq[txq_id].q;
2415 u8 *addr = il->stations[sta_id].sta.sta.addr;
2416 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2417 struct il_rxon_context *ctx;
2418
2419 ctx = &il->ctx;
2420
2421 lockdep_assert_held(&il->sta_lock);
2422
2423 switch (il->stations[sta_id].tid[tid].agg.state) {
2424 case IL_EMPTYING_HW_QUEUE_DELBA:
2425 /* We are reclaiming the last packet of the */
2426 /* aggregated HW queue */
2427 if (txq_id == tid_data->agg.txq_id &&
2428 q->read_ptr == q->write_ptr) {
2429 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
2430 int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
2431 D_HT("HW queue empty: continue DELBA flow\n");
2432 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2433 tid_data->agg.state = IL_AGG_OFF;
2434 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2435 }
2436 break;
2437 case IL_EMPTYING_HW_QUEUE_ADDBA:
2438 /* We are reclaiming the last packet of the queue */
2439 if (tid_data->tfds_in_queue == 0) {
2440 D_HT("HW queue empty: continue ADDBA flow\n");
2441 tid_data->agg.state = IL_AGG_ON;
2442 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2443 }
2444 break;
2445 }
2446
2447 return 0;
2448}
2449
2450static void
2451il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
2452 const u8 *addr1)
2453{
2454 struct ieee80211_sta *sta;
2455 struct il_station_priv *sta_priv;
2456
2457 rcu_read_lock();
2458 sta = ieee80211_find_sta(ctx->vif, addr1);
2459 if (sta) {
2460 sta_priv = (void *)sta->drv_priv;
2461 /* avoid atomic ops if this isn't a client */
2462 if (sta_priv->client &&
2463 atomic_dec_return(&sta_priv->pending_frames) == 0)
2464 ieee80211_sta_block_awake(il->hw, sta, false);
2465 }
2466 rcu_read_unlock();
2467}
2468
2469static void
2470il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
2471{
2472 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
2473
2474 if (!is_agg)
2475 il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
2476
2477 ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
2478}
2479
2480int
2481il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2482{
2483 struct il_tx_queue *txq = &il->txq[txq_id];
2484 struct il_queue *q = &txq->q;
2485 struct il_tx_info *tx_info;
2486 int nfreed = 0;
2487 struct ieee80211_hdr *hdr;
2488
2489 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2490 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2491 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
2492 q->write_ptr, q->read_ptr);
2493 return 0;
2494 }
2495
2496 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
2497 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2498
2499 tx_info = &txq->txb[txq->q.read_ptr];
2500
2501 if (WARN_ON_ONCE(tx_info->skb == NULL))
2502 continue;
2503
2504 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
2505 if (ieee80211_is_data_qos(hdr->frame_control))
2506 nfreed++;
2507
2508 il4965_tx_status(il, tx_info,
2509 txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2510 tx_info->skb = NULL;
2511
2512 il->cfg->ops->lib->txq_free_tfd(il, txq);
2513 }
2514 return nfreed;
2515}
2516
2517/**
2518 * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
2519 *
2520 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
2521 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
2522 */
2523static int
2524il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
2525 struct il_compressed_ba_resp *ba_resp)
2526{
2527 int i, sh, ack;
2528 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2529 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2530 int successes = 0;
2531 struct ieee80211_tx_info *info;
2532 u64 bitmap, sent_bitmap;
2533
2534 if (unlikely(!agg->wait_for_ba)) {
2535 if (unlikely(ba_resp->bitmap))
2536 IL_ERR("Received BA when not expected\n");
2537 return -EINVAL;
2538 }
2539
2540 /* Mark that the expected block-ack response arrived */
2541 agg->wait_for_ba = 0;
2542 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
2543
2544 /* Calculate shift to align block-ack bits with our Tx win bits */
2545 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2546 if (sh < 0) /* tbw something is wrong with indices */
2547 sh += 0x100;
2548
2549 if (agg->frame_count > (64 - sh)) {
2550 D_TX_REPLY("more frames than bitmap size");
2551 return -1;
2552 }
2553
2554 /* don't use 64-bit values for now */
2555 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2556
2557 /* check for success or failure according to the
2558 * transmitted bitmap and block-ack bitmap */
2559 sent_bitmap = bitmap & agg->bitmap;
2560
2561 /* For each frame attempted in aggregation,
2562 * update driver's record of tx frame's status. */
2563 i = 0;
2564 while (sent_bitmap) {
2565 ack = sent_bitmap & 1ULL;
2566 successes += ack;
2567 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
2568 i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
2569 sent_bitmap >>= 1;
2570 ++i;
2571 }
2572
2573 D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
2574
2575 info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
2576 memset(&info->status, 0, sizeof(info->status));
2577 info->flags |= IEEE80211_TX_STAT_ACK;
2578 info->flags |= IEEE80211_TX_STAT_AMPDU;
2579 info->status.ampdu_ack_len = successes;
2580 info->status.ampdu_len = agg->frame_count;
2581 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2582
2583 return 0;
2584}
2585
2586/**
2587 * translate ucode response to mac80211 tx status control values
2588 */
2589void
2590il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2591 struct ieee80211_tx_info *info)
2592{
2593 struct ieee80211_tx_rate *r = &info->control.rates[0];
2594
2595 info->antenna_sel_tx =
2596 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2597 if (rate_n_flags & RATE_MCS_HT_MSK)
2598 r->flags |= IEEE80211_TX_RC_MCS;
2599 if (rate_n_flags & RATE_MCS_GF_MSK)
2600 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2601 if (rate_n_flags & RATE_MCS_HT40_MSK)
2602 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2603 if (rate_n_flags & RATE_MCS_DUP_MSK)
2604 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2605 if (rate_n_flags & RATE_MCS_SGI_MSK)
2606 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2607 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2608}
2609
2610/**
2611 * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
2612 *
2613 * Handles block-acknowledge notification from device, which reports success
2614 * of frames sent via aggregation.
2615 */
2616void
2617il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2618{
2619 struct il_rx_pkt *pkt = rxb_addr(rxb);
2620 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2621 struct il_tx_queue *txq = NULL;
2622 struct il_ht_agg *agg;
2623 int idx;
2624 int sta_id;
2625 int tid;
2626 unsigned long flags;
2627
2628 /* "flow" corresponds to Tx queue */
2629 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2630
2631 /* "ssn" is start of block-ack Tx win, corresponds to idx
2632 * (in Tx queue's circular buffer) of first TFD/frame in win */
2633 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2634
2635 if (scd_flow >= il->hw_params.max_txq_num) {
2636 IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
2637 return;
2638 }
2639
2640 txq = &il->txq[scd_flow];
2641 sta_id = ba_resp->sta_id;
2642 tid = ba_resp->tid;
2643 agg = &il->stations[sta_id].tid[tid].agg;
2644 if (unlikely(agg->txq_id != scd_flow)) {
2645 /*
2646 * FIXME: this is a uCode bug which need to be addressed,
2647 * log the information and return for now!
2648 * since it is possible happen very often and in order
2649 * not to fill the syslog, don't enable the logging by default
2650 */
2651 D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
2652 scd_flow, agg->txq_id);
2653 return;
2654 }
2655
2656 /* Find idx just before block-ack win */
2657 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2658
2659 spin_lock_irqsave(&il->sta_lock, flags);
2660
2661 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
2662 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
2663 ba_resp->sta_id);
2664 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
2665 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
2666 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2667 ba_resp->scd_flow, ba_resp->scd_ssn);
2668 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
2669 (unsigned long long)agg->bitmap);
2670
2671 /* Update driver's record of ACK vs. not for each frame in win */
2672 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2673
2674 /* Release all TFDs before the SSN, i.e. all TFDs in front of
2675 * block-ack win (we assume that they've been successfully
2676 * transmitted ... if not, it's too late anyway). */
2677 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2678 /* calculate mac80211 ampdu sw queue to wake */
2679 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2680 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2681
2682 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2683 il->mac80211_registered &&
2684 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2685 il_wake_queue(il, txq);
2686
2687 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2688 }
2689
2690 spin_unlock_irqrestore(&il->sta_lock, flags);
2691}
2692
2693#ifdef CONFIG_IWLEGACY_DEBUG
2694const char *
2695il4965_get_tx_fail_reason(u32 status)
2696{
2697#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
2698#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
2699
2700 switch (status & TX_STATUS_MSK) {
2701 case TX_STATUS_SUCCESS:
2702 return "SUCCESS";
2703 TX_STATUS_POSTPONE(DELAY);
2704 TX_STATUS_POSTPONE(FEW_BYTES);
2705 TX_STATUS_POSTPONE(QUIET_PERIOD);
2706 TX_STATUS_POSTPONE(CALC_TTAK);
2707 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
2708 TX_STATUS_FAIL(SHORT_LIMIT);
2709 TX_STATUS_FAIL(LONG_LIMIT);
2710 TX_STATUS_FAIL(FIFO_UNDERRUN);
2711 TX_STATUS_FAIL(DRAIN_FLOW);
2712 TX_STATUS_FAIL(RFKILL_FLUSH);
2713 TX_STATUS_FAIL(LIFE_EXPIRE);
2714 TX_STATUS_FAIL(DEST_PS);
2715 TX_STATUS_FAIL(HOST_ABORTED);
2716 TX_STATUS_FAIL(BT_RETRY);
2717 TX_STATUS_FAIL(STA_INVALID);
2718 TX_STATUS_FAIL(FRAG_DROPPED);
2719 TX_STATUS_FAIL(TID_DISABLE);
2720 TX_STATUS_FAIL(FIFO_FLUSHED);
2721 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
2722 TX_STATUS_FAIL(PASSIVE_NO_RX);
2723 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
2724 }
2725
2726 return "UNKNOWN";
2727
2728#undef TX_STATUS_FAIL
2729#undef TX_STATUS_POSTPONE
2730}
2731#endif /* CONFIG_IWLEGACY_DEBUG */
2732
2733static struct il_link_quality_cmd *
2734il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
2735{
2736 int i, r;
2737 struct il_link_quality_cmd *link_cmd;
2738 u32 rate_flags = 0;
2739 __le32 rate_n_flags;
2740
2741 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
2742 if (!link_cmd) {
2743 IL_ERR("Unable to allocate memory for LQ cmd.\n");
2744 return NULL;
2745 }
2746 /* Set up the rate scaling to start at selected rate, fall back
2747 * all the way down to 1M in IEEE order, and then spin on 1M */
2748 if (il->band == IEEE80211_BAND_5GHZ)
2749 r = RATE_6M_IDX;
2750 else
2751 r = RATE_1M_IDX;
2752
2753 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
2754 rate_flags |= RATE_MCS_CCK_MSK;
2755
2756 rate_flags |=
2757 il4965_first_antenna(il->hw_params.
2758 valid_tx_ant) << RATE_MCS_ANT_POS;
2759 rate_n_flags = il4965_hw_set_rate_n_flags(il_rates[r].plcp, rate_flags);
2760 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2761 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
2762
2763 link_cmd->general_params.single_stream_ant_msk =
2764 il4965_first_antenna(il->hw_params.valid_tx_ant);
2765
2766 link_cmd->general_params.dual_stream_ant_msk =
2767 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
2768 valid_tx_ant);
2769 if (!link_cmd->general_params.dual_stream_ant_msk) {
2770 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
2771 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
2772 link_cmd->general_params.dual_stream_ant_msk =
2773 il->hw_params.valid_tx_ant;
2774 }
2775
2776 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2777 link_cmd->agg_params.agg_time_limit =
2778 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2779
2780 link_cmd->sta_id = sta_id;
2781
2782 return link_cmd;
2783}
2784
2785/*
2786 * il4965_add_bssid_station - Add the special IBSS BSSID station
2787 *
2788 * Function sleeps.
2789 */
2790int
2791il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
2792 const u8 *addr, u8 *sta_id_r)
2793{
2794 int ret;
2795 u8 sta_id;
2796 struct il_link_quality_cmd *link_cmd;
2797 unsigned long flags;
2798
2799 if (sta_id_r)
2800 *sta_id_r = IL_INVALID_STATION;
2801
2802 ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
2803 if (ret) {
2804 IL_ERR("Unable to add station %pM\n", addr);
2805 return ret;
2806 }
2807
2808 if (sta_id_r)
2809 *sta_id_r = sta_id;
2810
2811 spin_lock_irqsave(&il->sta_lock, flags);
2812 il->stations[sta_id].used |= IL_STA_LOCAL;
2813 spin_unlock_irqrestore(&il->sta_lock, flags);
2814
2815 /* Set up default rate scaling table in device's station table */
2816 link_cmd = il4965_sta_alloc_lq(il, sta_id);
2817 if (!link_cmd) {
2818 IL_ERR("Unable to initialize rate scaling for station %pM.\n",
2819 addr);
2820 return -ENOMEM;
2821 }
2822
2823 ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
2824 if (ret)
2825 IL_ERR("Link quality command failed (%d)\n", ret);
2826
2827 spin_lock_irqsave(&il->sta_lock, flags);
2828 il->stations[sta_id].lq = link_cmd;
2829 spin_unlock_irqrestore(&il->sta_lock, flags);
2830
2831 return 0;
2832}
2833
2834static int
2835il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2836 bool send_if_empty)
2837{
2838 int i, not_empty = 0;
2839 u8 buff[sizeof(struct il_wep_cmd) +
2840 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
2841 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
2842 size_t cmd_size = sizeof(struct il_wep_cmd);
2843 struct il_host_cmd cmd = {
2844 .id = ctx->wep_key_cmd,
2845 .data = wep_cmd,
2846 .flags = CMD_SYNC,
2847 };
2848
2849 might_sleep();
2850
2851 memset(wep_cmd, 0,
2852 cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
2853
2854 for (i = 0; i < WEP_KEYS_MAX; i++) {
2855 wep_cmd->key[i].key_idx = i;
2856 if (ctx->wep_keys[i].key_size) {
2857 wep_cmd->key[i].key_offset = i;
2858 not_empty = 1;
2859 } else {
2860 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
2861 }
2862
2863 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
2864 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
2865 ctx->wep_keys[i].key_size);
2866 }
2867
2868 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
2869 wep_cmd->num_keys = WEP_KEYS_MAX;
2870
2871 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
2872
2873 cmd.len = cmd_size;
2874
2875 if (not_empty || send_if_empty)
2876 return il_send_cmd(il, &cmd);
2877 else
2878 return 0;
2879}
2880
2881int
2882il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
2883{
2884 lockdep_assert_held(&il->mutex);
2885
2886 return il4965_static_wepkey_cmd(il, ctx, false);
2887}
2888
2889int
2890il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
2891 struct ieee80211_key_conf *keyconf)
2892{
2893 int ret;
2894
2895 lockdep_assert_held(&il->mutex);
2896
2897 D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
2898
2899 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
2900 if (il_is_rfkill(il)) {
2901 D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
2902 /* but keys in device are clear anyway so return success */
2903 return 0;
2904 }
2905 ret = il4965_static_wepkey_cmd(il, ctx, 1);
2906 D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
2907
2908 return ret;
2909}
2910
2911int
2912il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
2913 struct ieee80211_key_conf *keyconf)
2914{
2915 int ret;
2916
2917 lockdep_assert_held(&il->mutex);
2918
2919 if (keyconf->keylen != WEP_KEY_LEN_128 &&
2920 keyconf->keylen != WEP_KEY_LEN_64) {
2921 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
2922 return -EINVAL;
2923 }
2924
2925 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2926 keyconf->hw_key_idx = HW_KEY_DEFAULT;
2927 il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
2928
2929 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
2930 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
2931 keyconf->keylen);
2932
2933 ret = il4965_static_wepkey_cmd(il, ctx, false);
2934 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
2935 keyconf->keyidx, ret);
2936
2937 return ret;
2938}
2939
2940static int
2941il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
2942 struct ieee80211_key_conf *keyconf, u8 sta_id)
2943{
2944 unsigned long flags;
2945 __le16 key_flags = 0;
2946 struct il_addsta_cmd sta_cmd;
2947
2948 lockdep_assert_held(&il->mutex);
2949
2950 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2951
2952 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
2953 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
2954 key_flags &= ~STA_KEY_FLG_INVALID;
2955
2956 if (keyconf->keylen == WEP_KEY_LEN_128)
2957 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
2958
2959 if (sta_id == ctx->bcast_sta_id)
2960 key_flags |= STA_KEY_MULTICAST_MSK;
2961
2962 spin_lock_irqsave(&il->sta_lock, flags);
2963
2964 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
2965 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
2966 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
2967
2968 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
2969
2970 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
2971 keyconf->keylen);
2972
2973 if ((il->stations[sta_id].sta.key.
2974 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
2975 il->stations[sta_id].sta.key.key_offset =
2976 il_get_free_ucode_key_idx(il);
2977 /* else, we are overriding an existing key => no need to allocated room
2978 * in uCode. */
2979
2980 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
2981 "no space for a new key");
2982
2983 il->stations[sta_id].sta.key.key_flags = key_flags;
2984 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
2985 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
2986
2987 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2988 sizeof(struct il_addsta_cmd));
2989 spin_unlock_irqrestore(&il->sta_lock, flags);
2990
2991 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2992}
2993
2994static int
2995il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
2996 struct il_rxon_context *ctx,
2997 struct ieee80211_key_conf *keyconf, u8 sta_id)
2998{
2999 unsigned long flags;
3000 __le16 key_flags = 0;
3001 struct il_addsta_cmd sta_cmd;
3002
3003 lockdep_assert_held(&il->mutex);
3004
3005 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3006 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3007 key_flags &= ~STA_KEY_FLG_INVALID;
3008
3009 if (sta_id == ctx->bcast_sta_id)
3010 key_flags |= STA_KEY_MULTICAST_MSK;
3011
3012 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3013
3014 spin_lock_irqsave(&il->sta_lock, flags);
3015 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3016 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3017
3018 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3019
3020 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
3021
3022 if ((il->stations[sta_id].sta.key.
3023 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3024 il->stations[sta_id].sta.key.key_offset =
3025 il_get_free_ucode_key_idx(il);
3026 /* else, we are overriding an existing key => no need to allocated room
3027 * in uCode. */
3028
3029 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3030 "no space for a new key");
3031
3032 il->stations[sta_id].sta.key.key_flags = key_flags;
3033 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3034 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3035
3036 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3037 sizeof(struct il_addsta_cmd));
3038 spin_unlock_irqrestore(&il->sta_lock, flags);
3039
3040 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3041}
3042
3043static int
3044il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3045 struct il_rxon_context *ctx,
3046 struct ieee80211_key_conf *keyconf, u8 sta_id)
3047{
3048 unsigned long flags;
3049 int ret = 0;
3050 __le16 key_flags = 0;
3051
3052 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3053 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3054 key_flags &= ~STA_KEY_FLG_INVALID;
3055
3056 if (sta_id == ctx->bcast_sta_id)
3057 key_flags |= STA_KEY_MULTICAST_MSK;
3058
3059 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3060 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3061
3062 spin_lock_irqsave(&il->sta_lock, flags);
3063
3064 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3065 il->stations[sta_id].keyinfo.keylen = 16;
3066
3067 if ((il->stations[sta_id].sta.key.
3068 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3069 il->stations[sta_id].sta.key.key_offset =
3070 il_get_free_ucode_key_idx(il);
3071 /* else, we are overriding an existing key => no need to allocated room
3072 * in uCode. */
3073
3074 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3075 "no space for a new key");
3076
3077 il->stations[sta_id].sta.key.key_flags = key_flags;
3078
3079 /* This copy is acutally not needed: we get the key with each TX */
3080 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3081
3082 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3083
3084 spin_unlock_irqrestore(&il->sta_lock, flags);
3085
3086 return ret;
3087}
3088
3089void
3090il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
3091 struct ieee80211_key_conf *keyconf,
3092 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
3093{
3094 u8 sta_id;
3095 unsigned long flags;
3096 int i;
3097
3098 if (il_scan_cancel(il)) {
3099 /* cancel scan failed, just live w/ bad key and rely
3100 briefly on SW decryption */
3101 return;
3102 }
3103
3104 sta_id = il_sta_id_or_broadcast(il, ctx, sta);
3105 if (sta_id == IL_INVALID_STATION)
3106 return;
3107
3108 spin_lock_irqsave(&il->sta_lock, flags);
3109
3110 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3111
3112 for (i = 0; i < 5; i++)
3113 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3114 cpu_to_le16(phase1key[i]);
3115
3116 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3117 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3118
3119 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3120
3121 spin_unlock_irqrestore(&il->sta_lock, flags);
3122
3123}
3124
3125int
3126il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
3127 struct ieee80211_key_conf *keyconf, u8 sta_id)
3128{
3129 unsigned long flags;
3130 u16 key_flags;
3131 u8 keyidx;
3132 struct il_addsta_cmd sta_cmd;
3133
3134 lockdep_assert_held(&il->mutex);
3135
3136 ctx->key_mapping_keys--;
3137
3138 spin_lock_irqsave(&il->sta_lock, flags);
3139 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3140 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3141
3142 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
3143
3144 if (keyconf->keyidx != keyidx) {
3145 /* We need to remove a key with idx different that the one
3146 * in the uCode. This means that the key we need to remove has
3147 * been replaced by another one with different idx.
3148 * Don't do anything and return ok
3149 */
3150 spin_unlock_irqrestore(&il->sta_lock, flags);
3151 return 0;
3152 }
3153
3154 if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
3155 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3156 key_flags);
3157 spin_unlock_irqrestore(&il->sta_lock, flags);
3158 return 0;
3159 }
3160
3161 if (!test_and_clear_bit
3162 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
3163 IL_ERR("idx %d not used in uCode key table.\n",
3164 il->stations[sta_id].sta.key.key_offset);
3165 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
3166 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3167 il->stations[sta_id].sta.key.key_flags =
3168 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3169 il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
3170 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3171 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3172
3173 if (il_is_rfkill(il)) {
3174 D_WEP
3175 ("Not sending C_ADD_STA command because RFKILL enabled.\n");
3176 spin_unlock_irqrestore(&il->sta_lock, flags);
3177 return 0;
3178 }
3179 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3180 sizeof(struct il_addsta_cmd));
3181 spin_unlock_irqrestore(&il->sta_lock, flags);
3182
3183 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3184}
3185
3186int
3187il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
3188 struct ieee80211_key_conf *keyconf, u8 sta_id)
3189{
3190 int ret;
3191
3192 lockdep_assert_held(&il->mutex);
3193
3194 ctx->key_mapping_keys++;
3195 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3196
3197 switch (keyconf->cipher) {
3198 case WLAN_CIPHER_SUITE_CCMP:
3199 ret =
3200 il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
3201 break;
3202 case WLAN_CIPHER_SUITE_TKIP:
3203 ret =
3204 il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
3205 break;
3206 case WLAN_CIPHER_SUITE_WEP40:
3207 case WLAN_CIPHER_SUITE_WEP104:
3208 ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
3209 break;
3210 default:
3211 IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
3212 keyconf->cipher);
3213 ret = -EINVAL;
3214 }
3215
3216 D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3217 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
3218
3219 return ret;
3220}
3221
3222/**
3223 * il4965_alloc_bcast_station - add broadcast station into driver's station table.
3224 *
3225 * This adds the broadcast station into the driver's station table
3226 * and marks it driver active, so that it will be restored to the
3227 * device at the next best time.
3228 */
3229int
3230il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
3231{
3232 struct il_link_quality_cmd *link_cmd;
3233 unsigned long flags;
3234 u8 sta_id;
3235
3236 spin_lock_irqsave(&il->sta_lock, flags);
3237 sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
3238 if (sta_id == IL_INVALID_STATION) {
3239 IL_ERR("Unable to prepare broadcast station\n");
3240 spin_unlock_irqrestore(&il->sta_lock, flags);
3241
3242 return -EINVAL;
3243 }
3244
3245 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3246 il->stations[sta_id].used |= IL_STA_BCAST;
3247 spin_unlock_irqrestore(&il->sta_lock, flags);
3248
3249 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3250 if (!link_cmd) {
3251 IL_ERR
3252 ("Unable to initialize rate scaling for bcast station.\n");
3253 return -ENOMEM;
3254 }
3255
3256 spin_lock_irqsave(&il->sta_lock, flags);
3257 il->stations[sta_id].lq = link_cmd;
3258 spin_unlock_irqrestore(&il->sta_lock, flags);
3259
3260 return 0;
3261}
3262
3263/**
3264 * il4965_update_bcast_station - update broadcast station's LQ command
3265 *
3266 * Only used by iwl4965. Placed here to have all bcast station management
3267 * code together.
3268 */
3269static int
3270il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
3271{
3272 unsigned long flags;
3273 struct il_link_quality_cmd *link_cmd;
3274 u8 sta_id = ctx->bcast_sta_id;
3275
3276 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3277 if (!link_cmd) {
3278 IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
3279 return -ENOMEM;
3280 }
3281
3282 spin_lock_irqsave(&il->sta_lock, flags);
3283 if (il->stations[sta_id].lq)
3284 kfree(il->stations[sta_id].lq);
3285 else
3286 D_INFO("Bcast sta rate scaling has not been initialized.\n");
3287 il->stations[sta_id].lq = link_cmd;
3288 spin_unlock_irqrestore(&il->sta_lock, flags);
3289
3290 return 0;
3291}
3292
3293int
3294il4965_update_bcast_stations(struct il_priv *il)
3295{
3296 return il4965_update_bcast_station(il, &il->ctx);
3297}
3298
3299/**
3300 * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
3301 */
3302int
3303il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3304{
3305 unsigned long flags;
3306 struct il_addsta_cmd sta_cmd;
3307
3308 lockdep_assert_held(&il->mutex);
3309
3310 /* Remove "disable" flag, to enable Tx for this TID */
3311 spin_lock_irqsave(&il->sta_lock, flags);
3312 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3313 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3314 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3315 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3316 sizeof(struct il_addsta_cmd));
3317 spin_unlock_irqrestore(&il->sta_lock, flags);
3318
3319 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3320}
3321
3322int
3323il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
3324 u16 ssn)
3325{
3326 unsigned long flags;
3327 int sta_id;
3328 struct il_addsta_cmd sta_cmd;
3329
3330 lockdep_assert_held(&il->mutex);
3331
3332 sta_id = il_sta_id(sta);
3333 if (sta_id == IL_INVALID_STATION)
3334 return -ENXIO;
3335
3336 spin_lock_irqsave(&il->sta_lock, flags);
3337 il->stations[sta_id].sta.station_flags_msk = 0;
3338 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3339 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
3340 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3341 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3342 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3343 sizeof(struct il_addsta_cmd));
3344 spin_unlock_irqrestore(&il->sta_lock, flags);
3345
3346 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3347}
3348
3349int
3350il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
3351{
3352 unsigned long flags;
3353 int sta_id;
3354 struct il_addsta_cmd sta_cmd;
3355
3356 lockdep_assert_held(&il->mutex);
3357
3358 sta_id = il_sta_id(sta);
3359 if (sta_id == IL_INVALID_STATION) {
3360 IL_ERR("Invalid station for AGG tid %d\n", tid);
3361 return -ENXIO;
3362 }
3363
3364 spin_lock_irqsave(&il->sta_lock, flags);
3365 il->stations[sta_id].sta.station_flags_msk = 0;
3366 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3367 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
3368 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3369 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3370 sizeof(struct il_addsta_cmd));
3371 spin_unlock_irqrestore(&il->sta_lock, flags);
3372
3373 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3374}
3375
3376void
3377il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3378{
3379 unsigned long flags;
3380
3381 spin_lock_irqsave(&il->sta_lock, flags);
3382 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3383 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3384 il->stations[sta_id].sta.sta.modify_mask =
3385 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3386 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3387 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3388 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3389 spin_unlock_irqrestore(&il->sta_lock, flags);
3390
3391}
3392
3393void
3394il4965_update_chain_flags(struct il_priv *il)
3395{
3396 if (il->cfg->ops->hcmd->set_rxon_chain) {
3397 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
3398 if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
3399 il_commit_rxon(il, &il->ctx);
3400 }
3401}
3402
3403static void
3404il4965_clear_free_frames(struct il_priv *il)
3405{
3406 struct list_head *element;
3407
3408 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
3409
3410 while (!list_empty(&il->free_frames)) {
3411 element = il->free_frames.next;
3412 list_del(element);
3413 kfree(list_entry(element, struct il_frame, list));
3414 il->frames_count--;
3415 }
3416
3417 if (il->frames_count) {
3418 IL_WARN("%d frames still in use. Did we lose one?\n",
3419 il->frames_count);
3420 il->frames_count = 0;
3421 }
3422}
3423
3424static struct il_frame *
3425il4965_get_free_frame(struct il_priv *il)
3426{
3427 struct il_frame *frame;
3428 struct list_head *element;
3429 if (list_empty(&il->free_frames)) {
3430 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3431 if (!frame) {
3432 IL_ERR("Could not allocate frame!\n");
3433 return NULL;
3434 }
3435
3436 il->frames_count++;
3437 return frame;
3438 }
3439
3440 element = il->free_frames.next;
3441 list_del(element);
3442 return list_entry(element, struct il_frame, list);
3443}
3444
3445static void
3446il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3447{
3448 memset(frame, 0, sizeof(*frame));
3449 list_add(&frame->list, &il->free_frames);
3450}
3451
3452static u32
3453il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
3454 int left)
3455{
3456 lockdep_assert_held(&il->mutex);
3457
3458 if (!il->beacon_skb)
3459 return 0;
3460
3461 if (il->beacon_skb->len > left)
3462 return 0;
3463
3464 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3465
3466 return il->beacon_skb->len;
3467}
3468
3469/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
3470static void
3471il4965_set_beacon_tim(struct il_priv *il,
3472 struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
3473 u32 frame_size)
3474{
3475 u16 tim_idx;
3476 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3477
3478 /*
3479 * The idx is relative to frame start but we start looking at the
3480 * variable-length part of the beacon.
3481 */
3482 tim_idx = mgmt->u.beacon.variable - beacon;
3483
3484 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
3485 while ((tim_idx < (frame_size - 2)) &&
3486 (beacon[tim_idx] != WLAN_EID_TIM))
3487 tim_idx += beacon[tim_idx + 1] + 2;
3488
3489 /* If TIM field was found, set variables */
3490 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3491 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3492 tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
3493 } else
3494 IL_WARN("Unable to find TIM Element in beacon\n");
3495}
3496
3497static unsigned int
3498il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
3499{
3500 struct il_tx_beacon_cmd *tx_beacon_cmd;
3501 u32 frame_size;
3502 u32 rate_flags;
3503 u32 rate;
3504 /*
3505 * We have to set up the TX command, the TX Beacon command, and the
3506 * beacon contents.
3507 */
3508
3509 lockdep_assert_held(&il->mutex);
3510
3511 if (!il->beacon_ctx) {
3512 IL_ERR("trying to build beacon w/o beacon context!\n");
3513 return 0;
3514 }
3515
3516 /* Initialize memory */
3517 tx_beacon_cmd = &frame->u.beacon;
3518 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3519
3520 /* Set up TX beacon contents */
3521 frame_size =
3522 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3523 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3524 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3525 return 0;
3526 if (!frame_size)
3527 return 0;
3528
3529 /* Set up TX command fields */
3530 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
3531 tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
3532 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3533 tx_beacon_cmd->tx.tx_flags =
3534 TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
3535 TX_CMD_FLG_STA_RATE_MSK;
3536
3537 /* Set up TX beacon command fields */
3538 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
3539 frame_size);
3540
3541 /* Set up packet rate and flags */
3542 rate = il_get_lowest_plcp(il, il->beacon_ctx);
3543 il->mgmt_tx_ant =
3544 il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
3545 il->hw_params.valid_tx_ant);
3546 rate_flags = il4965_ant_idx_to_flags(il->mgmt_tx_ant);
3547 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3548 rate_flags |= RATE_MCS_CCK_MSK;
3549 tx_beacon_cmd->tx.rate_n_flags =
3550 il4965_hw_set_rate_n_flags(rate, rate_flags);
3551
3552 return sizeof(*tx_beacon_cmd) + frame_size;
3553}
3554
3555int
3556il4965_send_beacon_cmd(struct il_priv *il)
3557{
3558 struct il_frame *frame;
3559 unsigned int frame_size;
3560 int rc;
3561
3562 frame = il4965_get_free_frame(il);
3563 if (!frame) {
3564 IL_ERR("Could not obtain free frame buffer for beacon "
3565 "command.\n");
3566 return -ENOMEM;
3567 }
3568
3569 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3570 if (!frame_size) {
3571 IL_ERR("Error configuring the beacon command\n");
3572 il4965_free_frame(il, frame);
3573 return -EINVAL;
3574 }
3575
3576 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
3577
3578 il4965_free_frame(il, frame);
3579
3580 return rc;
3581}
3582
3583static inline dma_addr_t
3584il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3585{
3586 struct il_tfd_tb *tb = &tfd->tbs[idx];
3587
3588 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3589 if (sizeof(dma_addr_t) > sizeof(u32))
3590 addr |=
3591 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
3592 16;
3593
3594 return addr;
3595}
3596
3597static inline u16
3598il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3599{
3600 struct il_tfd_tb *tb = &tfd->tbs[idx];
3601
3602 return le16_to_cpu(tb->hi_n_len) >> 4;
3603}
3604
3605static inline void
3606il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
3607{
3608 struct il_tfd_tb *tb = &tfd->tbs[idx];
3609 u16 hi_n_len = len << 4;
3610
3611 put_unaligned_le32(addr, &tb->lo);
3612 if (sizeof(dma_addr_t) > sizeof(u32))
3613 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3614
3615 tb->hi_n_len = cpu_to_le16(hi_n_len);
3616
3617 tfd->num_tbs = idx + 1;
3618}
3619
3620static inline u8
3621il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3622{
3623 return tfd->num_tbs & 0x1f;
3624}
3625
3626/**
3627 * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
3628 * @il - driver ilate data
3629 * @txq - tx queue
3630 *
3631 * Does NOT advance any TFD circular buffer read/write idxes
3632 * Does NOT free the TFD itself (which is within circular buffer)
3633 */
3634void
3635il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3636{
3637 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3638 struct il_tfd *tfd;
3639 struct pci_dev *dev = il->pci_dev;
3640 int idx = txq->q.read_ptr;
3641 int i;
3642 int num_tbs;
3643
3644 tfd = &tfd_tmp[idx];
3645
3646 /* Sanity check on number of chunks */
3647 num_tbs = il4965_tfd_get_num_tbs(tfd);
3648
3649 if (num_tbs >= IL_NUM_OF_TBS) {
3650 IL_ERR("Too many chunks: %i\n", num_tbs);
3651 /* @todo issue fatal error, it is quite serious situation */
3652 return;
3653 }
3654
3655 /* Unmap tx_cmd */
3656 if (num_tbs)
3657 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
3658 dma_unmap_len(&txq->meta[idx], len),
3659 PCI_DMA_BIDIRECTIONAL);
3660
3661 /* Unmap chunks, if any. */
3662 for (i = 1; i < num_tbs; i++)
3663 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3664 il4965_tfd_tb_get_len(tfd, i),
3665 PCI_DMA_TODEVICE);
3666
3667 /* free SKB */
3668 if (txq->txb) {
3669 struct sk_buff *skb;
3670
3671 skb = txq->txb[txq->q.read_ptr].skb;
3672
3673 /* can be called from irqs-disabled context */
3674 if (skb) {
3675 dev_kfree_skb_any(skb);
3676 txq->txb[txq->q.read_ptr].skb = NULL;
3677 }
3678 }
3679}
3680
3681int
3682il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
3683 dma_addr_t addr, u16 len, u8 reset, u8 pad)
3684{
3685 struct il_queue *q;
3686 struct il_tfd *tfd, *tfd_tmp;
3687 u32 num_tbs;
3688
3689 q = &txq->q;
3690 tfd_tmp = (struct il_tfd *)txq->tfds;
3691 tfd = &tfd_tmp[q->write_ptr];
3692
3693 if (reset)
3694 memset(tfd, 0, sizeof(*tfd));
3695
3696 num_tbs = il4965_tfd_get_num_tbs(tfd);
3697
3698 /* Each TFD can point to a maximum 20 Tx buffers */
3699 if (num_tbs >= IL_NUM_OF_TBS) {
3700 IL_ERR("Error can not send more than %d chunks\n",
3701 IL_NUM_OF_TBS);
3702 return -EINVAL;
3703 }
3704
3705 BUG_ON(addr & ~DMA_BIT_MASK(36));
3706 if (unlikely(addr & ~IL_TX_DMA_MASK))
3707 IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
3708
3709 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
3710
3711 return 0;
3712}
3713
3714/*
3715 * Tell nic where to find circular buffer of Tx Frame Descriptors for
3716 * given Tx queue, and enable the DMA channel used for that queue.
3717 *
3718 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3719 * channels supported in hardware.
3720 */
3721int
3722il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
3723{
3724 int txq_id = txq->q.id;
3725
3726 /* Circular buffer (TFD queue in DRAM) physical base address */
3727 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
3728
3729 return 0;
3730}
3731
3732/******************************************************************************
3733 *
3734 * Generic RX handler implementations
3735 *
3736 ******************************************************************************/
3737static void
3738il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
3739{
3740 struct il_rx_pkt *pkt = rxb_addr(rxb);
3741 struct il_alive_resp *palive;
3742 struct delayed_work *pwork;
3743
3744 palive = &pkt->u.alive_frame;
3745
3746 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
3747 palive->is_valid, palive->ver_type, palive->ver_subtype);
3748
3749 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3750 D_INFO("Initialization Alive received.\n");
3751 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
3752 sizeof(struct il_init_alive_resp));
3753 pwork = &il->init_alive_start;
3754 } else {
3755 D_INFO("Runtime Alive received.\n");
3756 memcpy(&il->card_alive, &pkt->u.alive_frame,
3757 sizeof(struct il_alive_resp));
3758 pwork = &il->alive_start;
3759 }
3760
3761 /* We delay the ALIVE response by 5ms to
3762 * give the HW RF Kill time to activate... */
3763 if (palive->is_valid == UCODE_VALID_OK)
3764 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
3765 else
3766 IL_WARN("uCode did not respond OK.\n");
3767}
3768
3769/**
3770 * il4965_bg_stats_periodic - Timer callback to queue stats
3771 *
3772 * This callback is provided in order to send a stats request.
3773 *
3774 * This timer function is continually reset to execute within
3775 * REG_RECALIB_PERIOD seconds since the last N_STATS
3776 * was received. We need to ensure we receive the stats in order
3777 * to update the temperature used for calibrating the TXPOWER.
3778 */
3779static void
3780il4965_bg_stats_periodic(unsigned long data)
3781{
3782 struct il_priv *il = (struct il_priv *)data;
3783
3784 if (test_bit(S_EXIT_PENDING, &il->status))
3785 return;
3786
3787 /* dont send host command if rf-kill is on */
3788 if (!il_is_ready_rf(il))
3789 return;
3790
3791 il_send_stats_request(il, CMD_ASYNC, false);
3792}
3793
3794static void
3795il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
3796{
3797 struct il_rx_pkt *pkt = rxb_addr(rxb);
3798 struct il4965_beacon_notif *beacon =
3799 (struct il4965_beacon_notif *)pkt->u.raw;
3800#ifdef CONFIG_IWLEGACY_DEBUG
3801 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
3802
3803 D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
3804 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
3805 beacon->beacon_notify_hdr.failure_frame,
3806 le32_to_cpu(beacon->ibss_mgr_status),
3807 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
3808#endif
3809
3810 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
3811}
3812
3813static void
3814il4965_perform_ct_kill_task(struct il_priv *il)
3815{
3816 unsigned long flags;
3817
3818 D_POWER("Stop all queues\n");
3819
3820 if (il->mac80211_registered)
3821 ieee80211_stop_queues(il->hw);
3822
3823 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
3824 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
3825 _il_rd(il, CSR_UCODE_DRV_GP1);
3826
3827 spin_lock_irqsave(&il->reg_lock, flags);
3828 if (!_il_grab_nic_access(il))
3829 _il_release_nic_access(il);
3830 spin_unlock_irqrestore(&il->reg_lock, flags);
3831}
3832
3833/* Handle notification from uCode that card's power state is changing
3834 * due to software, hardware, or critical temperature RFKILL */
3835static void
3836il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
3837{
3838 struct il_rx_pkt *pkt = rxb_addr(rxb);
3839 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3840 unsigned long status = il->status;
3841
3842 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
3843 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3844 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
3845 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
3846
3847 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
3848
3849 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
3850 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3851
3852 il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3853
3854 if (!(flags & RXON_CARD_DISABLED)) {
3855 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
3856 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3857 il_wr(il, HBUS_TARG_MBX_C,
3858 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3859 }
3860 }
3861
3862 if (flags & CT_CARD_DISABLED)
3863 il4965_perform_ct_kill_task(il);
3864
3865 if (flags & HW_CARD_DISABLED)
3866 set_bit(S_RF_KILL_HW, &il->status);
3867 else
3868 clear_bit(S_RF_KILL_HW, &il->status);
3869
3870 if (!(flags & RXON_CARD_DISABLED))
3871 il_scan_cancel(il);
3872
3873 if ((test_bit(S_RF_KILL_HW, &status) !=
3874 test_bit(S_RF_KILL_HW, &il->status)))
3875 wiphy_rfkill_set_hw_state(il->hw->wiphy,
3876 test_bit(S_RF_KILL_HW, &il->status));
3877 else
3878 wake_up(&il->wait_command_queue);
3879}
3880
3881/**
3882 * il4965_setup_handlers - Initialize Rx handler callbacks
3883 *
3884 * Setup the RX handlers for each of the reply types sent from the uCode
3885 * to the host.
3886 *
3887 * This function chains into the hardware specific files for them to setup
3888 * any hardware specific handlers as well.
3889 */
3890static void
3891il4965_setup_handlers(struct il_priv *il)
3892{
3893 il->handlers[N_ALIVE] = il4965_hdl_alive;
3894 il->handlers[N_ERROR] = il_hdl_error;
3895 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
3896 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
3897 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
3898 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
3899 il->handlers[N_BEACON] = il4965_hdl_beacon;
3900
3901 /*
3902 * The same handler is used for both the REPLY to a discrete
3903 * stats request from the host as well as for the periodic
3904 * stats notifications (after received beacons) from the uCode.
3905 */
3906 il->handlers[C_STATS] = il4965_hdl_c_stats;
3907 il->handlers[N_STATS] = il4965_hdl_stats;
3908
3909 il_setup_rx_scan_handlers(il);
3910
3911 /* status change handler */
3912 il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
3913
3914 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
3915 /* Rx handlers */
3916 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
3917 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
3918 /* block ack */
3919 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
3920 /* Set up hardware specific Rx handlers */
3921 il->cfg->ops->lib->handler_setup(il);
3922}
3923
3924/**
3925 * il4965_rx_handle - Main entry function for receiving responses from uCode
3926 *
3927 * Uses the il->handlers callback function array to invoke
3928 * the appropriate handlers, including command responses,
3929 * frame-received notifications, and other notifications.
3930 */
3931void
3932il4965_rx_handle(struct il_priv *il)
3933{
3934 struct il_rx_buf *rxb;
3935 struct il_rx_pkt *pkt;
3936 struct il_rx_queue *rxq = &il->rxq;
3937 u32 r, i;
3938 int reclaim;
3939 unsigned long flags;
3940 u8 fill_rx = 0;
3941 u32 count = 8;
3942 int total_empty;
3943
3944 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
3945 * buffer that the driver may process (last buffer filled by ucode). */
3946 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
3947 i = rxq->read;
3948
3949 /* Rx interrupt, but nothing sent from uCode */
3950 if (i == r)
3951 D_RX("r = %d, i = %d\n", r, i);
3952
3953 /* calculate total frames need to be restock after handling RX */
3954 total_empty = r - rxq->write_actual;
3955 if (total_empty < 0)
3956 total_empty += RX_QUEUE_SIZE;
3957
3958 if (total_empty > (RX_QUEUE_SIZE / 2))
3959 fill_rx = 1;
3960
3961 while (i != r) {
3962 int len;
3963
3964 rxb = rxq->queue[i];
3965
3966 /* If an RXB doesn't have a Rx queue slot associated with it,
3967 * then a bug has been introduced in the queue refilling
3968 * routines -- catch it here */
3969 BUG_ON(rxb == NULL);
3970
3971 rxq->queue[i] = NULL;
3972
3973 pci_unmap_page(il->pci_dev, rxb->page_dma,
3974 PAGE_SIZE << il->hw_params.rx_page_order,
3975 PCI_DMA_FROMDEVICE);
3976 pkt = rxb_addr(rxb);
3977
3978 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
3979 len += sizeof(u32); /* account for status word */
3980
3981 /* Reclaim a command buffer only if this packet is a response
3982 * to a (driver-originated) command.
3983 * If the packet (e.g. Rx frame) originated from uCode,
3984 * there is no command buffer to reclaim.
3985 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
3986 * but apparently a few don't get set; catch them here. */
3987 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
3988 (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
3989 (pkt->hdr.cmd != N_RX_MPDU) &&
3990 (pkt->hdr.cmd != N_COMPRESSED_BA) &&
3991 (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
3992
3993 /* Based on type of command response or notification,
3994 * handle those that need handling via function in
3995 * handlers table. See il4965_setup_handlers() */
3996 if (il->handlers[pkt->hdr.cmd]) {
3997 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
3998 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3999 il->isr_stats.handlers[pkt->hdr.cmd]++;
4000 il->handlers[pkt->hdr.cmd] (il, rxb);
4001 } else {
4002 /* No handling needed */
4003 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
4004 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4005 }
4006
4007 /*
4008 * XXX: After here, we should always check rxb->page
4009 * against NULL before touching it or its virtual
4010 * memory (pkt). Because some handler might have
4011 * already taken or freed the pages.
4012 */
4013
4014 if (reclaim) {
4015 /* Invoke any callbacks, transfer the buffer to caller,
4016 * and fire off the (possibly) blocking il_send_cmd()
4017 * as we reclaim the driver command queue */
4018 if (rxb->page)
4019 il_tx_cmd_complete(il, rxb);
4020 else
4021 IL_WARN("Claim null rxb?\n");
4022 }
4023
4024 /* Reuse the page if possible. For notification packets and
4025 * SKBs that fail to Rx correctly, add them back into the
4026 * rx_free list for reuse later. */
4027 spin_lock_irqsave(&rxq->lock, flags);
4028 if (rxb->page != NULL) {
4029 rxb->page_dma =
4030 pci_map_page(il->pci_dev, rxb->page, 0,
4031 PAGE_SIZE << il->hw_params.
4032 rx_page_order, PCI_DMA_FROMDEVICE);
4033 list_add_tail(&rxb->list, &rxq->rx_free);
4034 rxq->free_count++;
4035 } else
4036 list_add_tail(&rxb->list, &rxq->rx_used);
4037
4038 spin_unlock_irqrestore(&rxq->lock, flags);
4039
4040 i = (i + 1) & RX_QUEUE_MASK;
4041 /* If there are a lot of unused frames,
4042 * restock the Rx queue so ucode wont assert. */
4043 if (fill_rx) {
4044 count++;
4045 if (count >= 8) {
4046 rxq->read = i;
4047 il4965_rx_replenish_now(il);
4048 count = 0;
4049 }
4050 }
4051 }
4052
4053 /* Backtrack one entry */
4054 rxq->read = i;
4055 if (fill_rx)
4056 il4965_rx_replenish_now(il);
4057 else
4058 il4965_rx_queue_restock(il);
4059}
4060
4061/* call this function to flush any scheduled tasklet */
4062static inline void
4063il4965_synchronize_irq(struct il_priv *il)
4064{
4065 /* wait to make sure we flush pending tasklet */
4066 synchronize_irq(il->pci_dev->irq);
4067 tasklet_kill(&il->irq_tasklet);
4068}
4069
4070static void
4071il4965_irq_tasklet(struct il_priv *il)
4072{
4073 u32 inta, handled = 0;
4074 u32 inta_fh;
4075 unsigned long flags;
4076 u32 i;
4077#ifdef CONFIG_IWLEGACY_DEBUG
4078 u32 inta_mask;
4079#endif
4080
4081 spin_lock_irqsave(&il->lock, flags);
4082
4083 /* Ack/clear/reset pending uCode interrupts.
4084 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4085 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
4086 inta = _il_rd(il, CSR_INT);
4087 _il_wr(il, CSR_INT, inta);
4088
4089 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4090 * Any new interrupts that happen after this, either while we're
4091 * in this tasklet, or later, will show up in next ISR/tasklet. */
4092 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4093 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4094
4095#ifdef CONFIG_IWLEGACY_DEBUG
4096 if (il_get_debug_level(il) & IL_DL_ISR) {
4097 /* just for debug */
4098 inta_mask = _il_rd(il, CSR_INT_MASK);
4099 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
4100 inta_mask, inta_fh);
4101 }
4102#endif
4103
4104 spin_unlock_irqrestore(&il->lock, flags);
4105
4106 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4107 * atomic, make sure that inta covers all the interrupts that
4108 * we've discovered, even if FH interrupt came in just after
4109 * reading CSR_INT. */
4110 if (inta_fh & CSR49_FH_INT_RX_MASK)
4111 inta |= CSR_INT_BIT_FH_RX;
4112 if (inta_fh & CSR49_FH_INT_TX_MASK)
4113 inta |= CSR_INT_BIT_FH_TX;
4114
4115 /* Now service all interrupt bits discovered above. */
4116 if (inta & CSR_INT_BIT_HW_ERR) {
4117 IL_ERR("Hardware error detected. Restarting.\n");
4118
4119 /* Tell the device to stop sending interrupts */
4120 il_disable_interrupts(il);
4121
4122 il->isr_stats.hw++;
4123 il_irq_handle_error(il);
4124
4125 handled |= CSR_INT_BIT_HW_ERR;
4126
4127 return;
4128 }
4129#ifdef CONFIG_IWLEGACY_DEBUG
4130 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4131 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4132 if (inta & CSR_INT_BIT_SCD) {
4133 D_ISR("Scheduler finished to transmit "
4134 "the frame/frames.\n");
4135 il->isr_stats.sch++;
4136 }
4137
4138 /* Alive notification via Rx interrupt will do the real work */
4139 if (inta & CSR_INT_BIT_ALIVE) {
4140 D_ISR("Alive interrupt\n");
4141 il->isr_stats.alive++;
4142 }
4143 }
4144#endif
4145 /* Safely ignore these bits for debug checks below */
4146 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4147
4148 /* HW RF KILL switch toggled */
4149 if (inta & CSR_INT_BIT_RF_KILL) {
4150 int hw_rf_kill = 0;
4151 if (!
4152 (_il_rd(il, CSR_GP_CNTRL) &
4153 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4154 hw_rf_kill = 1;
4155
4156 IL_WARN("RF_KILL bit toggled to %s.\n",
4157 hw_rf_kill ? "disable radio" : "enable radio");
4158
4159 il->isr_stats.rfkill++;
4160
4161 /* driver only loads ucode once setting the interface up.
4162 * the driver allows loading the ucode even if the radio
4163 * is killed. Hence update the killswitch state here. The
4164 * rfkill handler will care about restarting if needed.
4165 */
4166 if (!test_bit(S_ALIVE, &il->status)) {
4167 if (hw_rf_kill)
4168 set_bit(S_RF_KILL_HW, &il->status);
4169 else
4170 clear_bit(S_RF_KILL_HW, &il->status);
4171 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4172 }
4173
4174 handled |= CSR_INT_BIT_RF_KILL;
4175 }
4176
4177 /* Chip got too hot and stopped itself */
4178 if (inta & CSR_INT_BIT_CT_KILL) {
4179 IL_ERR("Microcode CT kill error detected.\n");
4180 il->isr_stats.ctkill++;
4181 handled |= CSR_INT_BIT_CT_KILL;
4182 }
4183
4184 /* Error detected by uCode */
4185 if (inta & CSR_INT_BIT_SW_ERR) {
4186 IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
4187 inta);
4188 il->isr_stats.sw++;
4189 il_irq_handle_error(il);
4190 handled |= CSR_INT_BIT_SW_ERR;
4191 }
4192
4193 /*
4194 * uCode wakes up after power-down sleep.
4195 * Tell device about any new tx or host commands enqueued,
4196 * and about any Rx buffers made available while asleep.
4197 */
4198 if (inta & CSR_INT_BIT_WAKEUP) {
4199 D_ISR("Wakeup interrupt\n");
4200 il_rx_queue_update_write_ptr(il, &il->rxq);
4201 for (i = 0; i < il->hw_params.max_txq_num; i++)
4202 il_txq_update_write_ptr(il, &il->txq[i]);
4203 il->isr_stats.wakeup++;
4204 handled |= CSR_INT_BIT_WAKEUP;
4205 }
4206
4207 /* All uCode command responses, including Tx command responses,
4208 * Rx "responses" (frame-received notification), and other
4209 * notifications from uCode come through here*/
4210 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4211 il4965_rx_handle(il);
4212 il->isr_stats.rx++;
4213 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4214 }
4215
4216 /* This "Tx" DMA channel is used only for loading uCode */
4217 if (inta & CSR_INT_BIT_FH_TX) {
4218 D_ISR("uCode load interrupt\n");
4219 il->isr_stats.tx++;
4220 handled |= CSR_INT_BIT_FH_TX;
4221 /* Wake up uCode load routine, now that load is complete */
4222 il->ucode_write_complete = 1;
4223 wake_up(&il->wait_command_queue);
4224 }
4225
4226 if (inta & ~handled) {
4227 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4228 il->isr_stats.unhandled++;
4229 }
4230
4231 if (inta & ~(il->inta_mask)) {
4232 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4233 inta & ~il->inta_mask);
4234 IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
4235 }
4236
4237 /* Re-enable all interrupts */
4238 /* only Re-enable if disabled by irq */
4239 if (test_bit(S_INT_ENABLED, &il->status))
4240 il_enable_interrupts(il);
4241 /* Re-enable RF_KILL if it occurred */
4242 else if (handled & CSR_INT_BIT_RF_KILL)
4243 il_enable_rfkill_int(il);
4244
4245#ifdef CONFIG_IWLEGACY_DEBUG
4246 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4247 inta = _il_rd(il, CSR_INT);
4248 inta_mask = _il_rd(il, CSR_INT_MASK);
4249 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4250 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4251 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4252 }
4253#endif
4254}
4255
4256/*****************************************************************************
4257 *
4258 * sysfs attributes
4259 *
4260 *****************************************************************************/
4261
4262#ifdef CONFIG_IWLEGACY_DEBUG
4263
4264/*
4265 * The following adds a new attribute to the sysfs representation
4266 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
4267 * used for controlling the debug level.
4268 *
4269 * See the level definitions in iwl for details.
4270 *
4271 * The debug_level being managed using sysfs below is a per device debug
4272 * level that is used instead of the global debug level if it (the per
4273 * device debug level) is set.
4274 */
4275static ssize_t
4276il4965_show_debug_level(struct device *d, struct device_attribute *attr,
4277 char *buf)
4278{
4279 struct il_priv *il = dev_get_drvdata(d);
4280 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4281}
4282
4283static ssize_t
4284il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4285 const char *buf, size_t count)
4286{
4287 struct il_priv *il = dev_get_drvdata(d);
4288 unsigned long val;
4289 int ret;
4290
4291 ret = strict_strtoul(buf, 0, &val);
4292 if (ret)
4293 IL_ERR("%s is not in hex or decimal form.\n", buf);
4294 else {
4295 il->debug_level = val;
4296 if (il_alloc_traffic_mem(il))
4297 IL_ERR("Not enough memory to generate traffic log\n");
4298 }
4299 return strnlen(buf, count);
4300}
4301
4302static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
4303 il4965_store_debug_level);
4304
4305#endif /* CONFIG_IWLEGACY_DEBUG */
4306
4307static ssize_t
4308il4965_show_temperature(struct device *d, struct device_attribute *attr,
4309 char *buf)
4310{
4311 struct il_priv *il = dev_get_drvdata(d);
4312
4313 if (!il_is_alive(il))
4314 return -EAGAIN;
4315
4316 return sprintf(buf, "%d\n", il->temperature);
4317}
4318
4319static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
4320
4321static ssize_t
4322il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
4323{
4324 struct il_priv *il = dev_get_drvdata(d);
4325
4326 if (!il_is_ready_rf(il))
4327 return sprintf(buf, "off\n");
4328 else
4329 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4330}
4331
4332static ssize_t
4333il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4334 const char *buf, size_t count)
4335{
4336 struct il_priv *il = dev_get_drvdata(d);
4337 unsigned long val;
4338 int ret;
4339
4340 ret = strict_strtoul(buf, 10, &val);
4341 if (ret)
4342 IL_INFO("%s is not in decimal form.\n", buf);
4343 else {
4344 ret = il_set_tx_power(il, val, false);
4345 if (ret)
4346 IL_ERR("failed setting tx power (0x%d).\n", ret);
4347 else
4348 ret = count;
4349 }
4350 return ret;
4351}
4352
4353static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
4354 il4965_store_tx_power);
4355
4356static struct attribute *il_sysfs_entries[] = {
4357 &dev_attr_temperature.attr,
4358 &dev_attr_tx_power.attr,
4359#ifdef CONFIG_IWLEGACY_DEBUG
4360 &dev_attr_debug_level.attr,
4361#endif
4362 NULL
4363};
4364
4365static struct attribute_group il_attribute_group = {
4366 .name = NULL, /* put in device directory */
4367 .attrs = il_sysfs_entries,
4368};
4369
4370/******************************************************************************
4371 *
4372 * uCode download functions
4373 *
4374 ******************************************************************************/
4375
4376static void
4377il4965_dealloc_ucode_pci(struct il_priv *il)
4378{
4379 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4380 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4381 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4382 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4383 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4384 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4385}
4386
4387static void
4388il4965_nic_start(struct il_priv *il)
4389{
4390 /* Remove all resets to allow NIC to operate */
4391 _il_wr(il, CSR_RESET, 0);
4392}
4393
4394static void il4965_ucode_callback(const struct firmware *ucode_raw,
4395 void *context);
4396static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
4397
4398static int __must_check
4399il4965_request_firmware(struct il_priv *il, bool first)
4400{
4401 const char *name_pre = il->cfg->fw_name_pre;
4402 char tag[8];
4403
4404 if (first) {
4405 il->fw_idx = il->cfg->ucode_api_max;
4406 sprintf(tag, "%d", il->fw_idx);
4407 } else {
4408 il->fw_idx--;
4409 sprintf(tag, "%d", il->fw_idx);
4410 }
4411
4412 if (il->fw_idx < il->cfg->ucode_api_min) {
4413 IL_ERR("no suitable firmware found!\n");
4414 return -ENOENT;
4415 }
4416
4417 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4418
4419 D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
4420
4421 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4422 &il->pci_dev->dev, GFP_KERNEL, il,
4423 il4965_ucode_callback);
4424}
4425
4426struct il4965_firmware_pieces {
4427 const void *inst, *data, *init, *init_data, *boot;
4428 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4429};
4430
4431static int
4432il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
4433 struct il4965_firmware_pieces *pieces)
4434{
4435 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4436 u32 api_ver, hdr_size;
4437 const u8 *src;
4438
4439 il->ucode_ver = le32_to_cpu(ucode->ver);
4440 api_ver = IL_UCODE_API(il->ucode_ver);
4441
4442 switch (api_ver) {
4443 default:
4444 case 0:
4445 case 1:
4446 case 2:
4447 hdr_size = 24;
4448 if (ucode_raw->size < hdr_size) {
4449 IL_ERR("File size too small!\n");
4450 return -EINVAL;
4451 }
4452 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4453 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4454 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4455 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
4456 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4457 src = ucode->v1.data;
4458 break;
4459 }
4460
4461 /* Verify size of file vs. image size info in file's header */
4462 if (ucode_raw->size !=
4463 hdr_size + pieces->inst_size + pieces->data_size +
4464 pieces->init_size + pieces->init_data_size + pieces->boot_size) {
4465
4466 IL_ERR("uCode file size %d does not match expected size\n",
4467 (int)ucode_raw->size);
4468 return -EINVAL;
4469 }
4470
4471 pieces->inst = src;
4472 src += pieces->inst_size;
4473 pieces->data = src;
4474 src += pieces->data_size;
4475 pieces->init = src;
4476 src += pieces->init_size;
4477 pieces->init_data = src;
4478 src += pieces->init_data_size;
4479 pieces->boot = src;
4480 src += pieces->boot_size;
4481
4482 return 0;
4483}
4484
4485/**
4486 * il4965_ucode_callback - callback when firmware was loaded
4487 *
4488 * If loaded successfully, copies the firmware into buffers
4489 * for the card to fetch (via DMA).
4490 */
4491static void
4492il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4493{
4494 struct il_priv *il = context;
4495 struct il_ucode_header *ucode;
4496 int err;
4497 struct il4965_firmware_pieces pieces;
4498 const unsigned int api_max = il->cfg->ucode_api_max;
4499 const unsigned int api_min = il->cfg->ucode_api_min;
4500 u32 api_ver;
4501
4502 u32 max_probe_length = 200;
4503 u32 standard_phy_calibration_size =
4504 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4505
4506 memset(&pieces, 0, sizeof(pieces));
4507
4508 if (!ucode_raw) {
4509 if (il->fw_idx <= il->cfg->ucode_api_max)
4510 IL_ERR("request for firmware file '%s' failed.\n",
4511 il->firmware_name);
4512 goto try_again;
4513 }
4514
4515 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
4516 ucode_raw->size);
4517
4518 /* Make sure that we got at least the API version number */
4519 if (ucode_raw->size < 4) {
4520 IL_ERR("File size way too small!\n");
4521 goto try_again;
4522 }
4523
4524 /* Data from ucode file: header followed by uCode images */
4525 ucode = (struct il_ucode_header *)ucode_raw->data;
4526
4527 err = il4965_load_firmware(il, ucode_raw, &pieces);
4528
4529 if (err)
4530 goto try_again;
4531
4532 api_ver = IL_UCODE_API(il->ucode_ver);
4533
4534 /*
4535 * api_ver should match the api version forming part of the
4536 * firmware filename ... but we don't check for that and only rely
4537 * on the API version read from firmware header from here on forward
4538 */
4539 if (api_ver < api_min || api_ver > api_max) {
4540 IL_ERR("Driver unable to support your firmware API. "
4541 "Driver supports v%u, firmware is v%u.\n", api_max,
4542 api_ver);
4543 goto try_again;
4544 }
4545
4546 if (api_ver != api_max)
4547 IL_ERR("Firmware has old API version. Expected v%u, "
4548 "got v%u. New firmware can be obtained "
4549 "from http://www.intellinuxwireless.org.\n", api_max,
4550 api_ver);
4551
4552 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4553 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
4554 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
4555
4556 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
4557 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
4558 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
4559 IL_UCODE_SERIAL(il->ucode_ver));
4560
4561 /*
4562 * For any of the failures below (before allocating pci memory)
4563 * we will try to load a version with a smaller API -- maybe the
4564 * user just got a corrupted version of the latest API.
4565 */
4566
4567 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
4568 D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
4569 D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
4570 D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
4571 D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
4572 D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
4573
4574 /* Verify that uCode images will fit in card's SRAM */
4575 if (pieces.inst_size > il->hw_params.max_inst_size) {
4576 IL_ERR("uCode instr len %Zd too large to fit in\n",
4577 pieces.inst_size);
4578 goto try_again;
4579 }
4580
4581 if (pieces.data_size > il->hw_params.max_data_size) {
4582 IL_ERR("uCode data len %Zd too large to fit in\n",
4583 pieces.data_size);
4584 goto try_again;
4585 }
4586
4587 if (pieces.init_size > il->hw_params.max_inst_size) {
4588 IL_ERR("uCode init instr len %Zd too large to fit in\n",
4589 pieces.init_size);
4590 goto try_again;
4591 }
4592
4593 if (pieces.init_data_size > il->hw_params.max_data_size) {
4594 IL_ERR("uCode init data len %Zd too large to fit in\n",
4595 pieces.init_data_size);
4596 goto try_again;
4597 }
4598
4599 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4600 IL_ERR("uCode boot instr len %Zd too large to fit in\n",
4601 pieces.boot_size);
4602 goto try_again;
4603 }
4604
4605 /* Allocate ucode buffers for card's bus-master loading ... */
4606
4607 /* Runtime instructions and 2 copies of data:
4608 * 1) unmodified from disk
4609 * 2) backup cache for save/restore during power-downs */
4610 il->ucode_code.len = pieces.inst_size;
4611 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4612
4613 il->ucode_data.len = pieces.data_size;
4614 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4615
4616 il->ucode_data_backup.len = pieces.data_size;
4617 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4618
4619 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4620 !il->ucode_data_backup.v_addr)
4621 goto err_pci_alloc;
4622
4623 /* Initialization instructions and data */
4624 if (pieces.init_size && pieces.init_data_size) {
4625 il->ucode_init.len = pieces.init_size;
4626 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4627
4628 il->ucode_init_data.len = pieces.init_data_size;
4629 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4630
4631 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4632 goto err_pci_alloc;
4633 }
4634
4635 /* Bootstrap (instructions only, no data) */
4636 if (pieces.boot_size) {
4637 il->ucode_boot.len = pieces.boot_size;
4638 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4639
4640 if (!il->ucode_boot.v_addr)
4641 goto err_pci_alloc;
4642 }
4643
4644 /* Now that we can no longer fail, copy information */
4645
4646 il->sta_key_max_num = STA_KEY_MAX_NUM;
4647
4648 /* Copy images into buffers for card's bus-master reads ... */
4649
4650 /* Runtime instructions (first block of data in file) */
4651 D_INFO("Copying (but not loading) uCode instr len %Zd\n",
4652 pieces.inst_size);
4653 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4654
4655 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4656 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
4657
4658 /*
4659 * Runtime data
4660 * NOTE: Copy into backup buffer will be done in il_up()
4661 */
4662 D_INFO("Copying (but not loading) uCode data len %Zd\n",
4663 pieces.data_size);
4664 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4665 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4666
4667 /* Initialization instructions */
4668 if (pieces.init_size) {
4669 D_INFO("Copying (but not loading) init instr len %Zd\n",
4670 pieces.init_size);
4671 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4672 }
4673
4674 /* Initialization data */
4675 if (pieces.init_data_size) {
4676 D_INFO("Copying (but not loading) init data len %Zd\n",
4677 pieces.init_data_size);
4678 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4679 pieces.init_data_size);
4680 }
4681
4682 /* Bootstrap instructions */
4683 D_INFO("Copying (but not loading) boot instr len %Zd\n",
4684 pieces.boot_size);
4685 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4686
4687 /*
4688 * figure out the offset of chain noise reset and gain commands
4689 * base on the size of standard phy calibration commands table size
4690 */
4691 il->_4965.phy_calib_chain_noise_reset_cmd =
4692 standard_phy_calibration_size;
4693 il->_4965.phy_calib_chain_noise_gain_cmd =
4694 standard_phy_calibration_size + 1;
4695
4696 /**************************************************
4697 * This is still part of probe() in a sense...
4698 *
4699 * 9. Setup and register with mac80211 and debugfs
4700 **************************************************/
4701 err = il4965_mac_setup_register(il, max_probe_length);
4702 if (err)
4703 goto out_unbind;
4704
4705 err = il_dbgfs_register(il, DRV_NAME);
4706 if (err)
4707 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
4708 err);
4709
4710 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
4711 if (err) {
4712 IL_ERR("failed to create sysfs device attributes\n");
4713 goto out_unbind;
4714 }
4715
4716 /* We have our copies now, allow OS release its copies */
4717 release_firmware(ucode_raw);
4718 complete(&il->_4965.firmware_loading_complete);
4719 return;
4720
4721try_again:
4722 /* try next, if any */
4723 if (il4965_request_firmware(il, false))
4724 goto out_unbind;
4725 release_firmware(ucode_raw);
4726 return;
4727
4728err_pci_alloc:
4729 IL_ERR("failed to allocate pci memory\n");
4730 il4965_dealloc_ucode_pci(il);
4731out_unbind:
4732 complete(&il->_4965.firmware_loading_complete);
4733 device_release_driver(&il->pci_dev->dev);
4734 release_firmware(ucode_raw);
4735}
4736
4737static const char *const desc_lookup_text[] = {
4738 "OK",
4739 "FAIL",
4740 "BAD_PARAM",
4741 "BAD_CHECKSUM",
4742 "NMI_INTERRUPT_WDG",
4743 "SYSASSERT",
4744 "FATAL_ERROR",
4745 "BAD_COMMAND",
4746 "HW_ERROR_TUNE_LOCK",
4747 "HW_ERROR_TEMPERATURE",
4748 "ILLEGAL_CHAN_FREQ",
4749 "VCC_NOT_STBL",
4750 "FH49_ERROR",
4751 "NMI_INTERRUPT_HOST",
4752 "NMI_INTERRUPT_ACTION_PT",
4753 "NMI_INTERRUPT_UNKNOWN",
4754 "UCODE_VERSION_MISMATCH",
4755 "HW_ERROR_ABS_LOCK",
4756 "HW_ERROR_CAL_LOCK_FAIL",
4757 "NMI_INTERRUPT_INST_ACTION_PT",
4758 "NMI_INTERRUPT_DATA_ACTION_PT",
4759 "NMI_TRM_HW_ER",
4760 "NMI_INTERRUPT_TRM",
4761 "NMI_INTERRUPT_BREAK_POINT",
4762 "DEBUG_0",
4763 "DEBUG_1",
4764 "DEBUG_2",
4765 "DEBUG_3",
4766};
4767
4768static struct {
4769 char *name;
4770 u8 num;
4771} advanced_lookup[] = {
4772 {
4773 "NMI_INTERRUPT_WDG", 0x34}, {
4774 "SYSASSERT", 0x35}, {
4775 "UCODE_VERSION_MISMATCH", 0x37}, {
4776 "BAD_COMMAND", 0x38}, {
4777 "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
4778 "FATAL_ERROR", 0x3D}, {
4779 "NMI_TRM_HW_ERR", 0x46}, {
4780 "NMI_INTERRUPT_TRM", 0x4C}, {
4781 "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
4782 "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
4783 "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
4784 "NMI_INTERRUPT_HOST", 0x66}, {
4785 "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
4786 "NMI_INTERRUPT_UNKNOWN", 0x84}, {
4787 "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
4788"ADVANCED_SYSASSERT", 0},};
4789
4790static const char *
4791il4965_desc_lookup(u32 num)
4792{
4793 int i;
4794 int max = ARRAY_SIZE(desc_lookup_text);
4795
4796 if (num < max)
4797 return desc_lookup_text[num];
4798
4799 max = ARRAY_SIZE(advanced_lookup) - 1;
4800 for (i = 0; i < max; i++) {
4801 if (advanced_lookup[i].num == num)
4802 break;
4803 }
4804 return advanced_lookup[i].name;
4805}
4806
4807#define ERROR_START_OFFSET (1 * sizeof(u32))
4808#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4809
4810void
4811il4965_dump_nic_error_log(struct il_priv *il)
4812{
4813 u32 data2, line;
4814 u32 desc, time, count, base, data1;
4815 u32 blink1, blink2, ilink1, ilink2;
4816 u32 pc, hcmd;
4817
4818 if (il->ucode_type == UCODE_INIT)
4819 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
4820 else
4821 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
4822
4823 if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4824 IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
4825 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
4826 return;
4827 }
4828
4829 count = il_read_targ_mem(il, base);
4830
4831 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4832 IL_ERR("Start IWL Error Log Dump:\n");
4833 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
4834 }
4835
4836 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
4837 il->isr_stats.err_code = desc;
4838 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
4839 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
4840 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
4841 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
4842 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
4843 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
4844 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
4845 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
4846 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
4847 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
4848
4849 IL_ERR("Desc Time "
4850 "data1 data2 line\n");
4851 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
4852 il4965_desc_lookup(desc), desc, time, data1, data2, line);
4853 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
4854 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
4855 blink2, ilink1, ilink2, hcmd);
4856}
4857
4858static void
4859il4965_rf_kill_ct_config(struct il_priv *il)
4860{
4861 struct il_ct_kill_config cmd;
4862 unsigned long flags;
4863 int ret = 0;
4864
4865 spin_lock_irqsave(&il->lock, flags);
4866 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4867 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4868 spin_unlock_irqrestore(&il->lock, flags);
4869
4870 cmd.critical_temperature_R =
4871 cpu_to_le32(il->hw_params.ct_kill_threshold);
4872
4873 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
4874 if (ret)
4875 IL_ERR("C_CT_KILL_CONFIG failed\n");
4876 else
4877 D_INFO("C_CT_KILL_CONFIG " "succeeded, "
4878 "critical temperature is %d\n",
4879 il->hw_params.ct_kill_threshold);
4880}
4881
4882static const s8 default_queue_to_tx_fifo[] = {
4883 IL_TX_FIFO_VO,
4884 IL_TX_FIFO_VI,
4885 IL_TX_FIFO_BE,
4886 IL_TX_FIFO_BK,
4887 IL49_CMD_FIFO_NUM,
4888 IL_TX_FIFO_UNUSED,
4889 IL_TX_FIFO_UNUSED,
4890};
4891
4892#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
4893
4894static int
4895il4965_alive_notify(struct il_priv *il)
4896{
4897 u32 a;
4898 unsigned long flags;
4899 int i, chan;
4900 u32 reg_val;
4901
4902 spin_lock_irqsave(&il->lock, flags);
4903
4904 /* Clear 4965's internal Tx Scheduler data base */
4905 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
4906 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
4907 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
4908 il_write_targ_mem(il, a, 0);
4909 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
4910 il_write_targ_mem(il, a, 0);
4911 for (;
4912 a <
4913 il->scd_base_addr +
4914 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
4915 a += 4)
4916 il_write_targ_mem(il, a, 0);
4917
4918 /* Tel 4965 where to find Tx byte count tables */
4919 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
4920
4921 /* Enable DMA channel */
4922 for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
4923 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
4924 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4925 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
4926
4927 /* Update FH chicken bits */
4928 reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
4929 il_wr(il, FH49_TX_CHICKEN_BITS_REG,
4930 reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
4931
4932 /* Disable chain mode for all queues */
4933 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
4934
4935 /* Initialize each Tx queue (including the command queue) */
4936 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4937
4938 /* TFD circular buffer read/write idxes */
4939 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
4940 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
4941
4942 /* Max Tx Window size for Scheduler-ACK mode */
4943 il_write_targ_mem(il,
4944 il->scd_base_addr +
4945 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
4946 (SCD_WIN_SIZE <<
4947 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4948 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4949
4950 /* Frame limit */
4951 il_write_targ_mem(il,
4952 il->scd_base_addr +
4953 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
4954 sizeof(u32),
4955 (SCD_FRAME_LIMIT <<
4956 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4957 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4958
4959 }
4960 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
4961 (1 << il->hw_params.max_txq_num) - 1);
4962
4963 /* Activate all Tx DMA/FIFO channels */
4964 il4965_txq_set_sched(il, IL_MASK(0, 6));
4965
4966 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
4967
4968 /* make sure all queue are not stopped */
4969 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
4970 for (i = 0; i < 4; i++)
4971 atomic_set(&il->queue_stop_count[i], 0);
4972
4973 /* reset to 0 to enable all the queue first */
4974 il->txq_ctx_active_msk = 0;
4975 /* Map each Tx/cmd queue to its corresponding fifo */
4976 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
4977
4978 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
4979 int ac = default_queue_to_tx_fifo[i];
4980
4981 il_txq_ctx_activate(il, i);
4982
4983 if (ac == IL_TX_FIFO_UNUSED)
4984 continue;
4985
4986 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
4987 }
4988
4989 spin_unlock_irqrestore(&il->lock, flags);
4990
4991 return 0;
4992}
4993
4994/**
4995 * il4965_alive_start - called after N_ALIVE notification received
4996 * from protocol/runtime uCode (initialization uCode's
4997 * Alive gets handled by il_init_alive_start()).
4998 */
4999static void
5000il4965_alive_start(struct il_priv *il)
5001{
5002 int ret = 0;
5003 struct il_rxon_context *ctx = &il->ctx;
5004
5005 D_INFO("Runtime Alive received.\n");
5006
5007 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5008 /* We had an error bringing up the hardware, so take it
5009 * all the way back down so we can try again */
5010 D_INFO("Alive failed.\n");
5011 goto restart;
5012 }
5013
5014 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5015 * This is a paranoid check, because we would not have gotten the
5016 * "runtime" alive if code weren't properly loaded. */
5017 if (il4965_verify_ucode(il)) {
5018 /* Runtime instruction load was bad;
5019 * take it all the way back down so we can try again */
5020 D_INFO("Bad runtime uCode load.\n");
5021 goto restart;
5022 }
5023
5024 ret = il4965_alive_notify(il);
5025 if (ret) {
5026 IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
5027 goto restart;
5028 }
5029
5030 /* After the ALIVE response, we can send host commands to the uCode */
5031 set_bit(S_ALIVE, &il->status);
5032
5033 /* Enable watchdog to monitor the driver tx queues */
5034 il_setup_watchdog(il);
5035
5036 if (il_is_rfkill(il))
5037 return;
5038
5039 ieee80211_wake_queues(il->hw);
5040
5041 il->active_rate = RATES_MASK;
5042
5043 if (il_is_associated_ctx(ctx)) {
5044 struct il_rxon_cmd *active_rxon =
5045 (struct il_rxon_cmd *)&ctx->active;
5046 /* apply any changes in staging */
5047 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5048 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5049 } else {
5050 /* Initialize our rx_config data */
5051 il_connection_init_rx_config(il, &il->ctx);
5052
5053 if (il->cfg->ops->hcmd->set_rxon_chain)
5054 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
5055 }
5056
5057 /* Configure bluetooth coexistence if enabled */
5058 il_send_bt_config(il);
5059
5060 il4965_reset_run_time_calib(il);
5061
5062 set_bit(S_READY, &il->status);
5063
5064 /* Configure the adapter for unassociated operation */
5065 il_commit_rxon(il, ctx);
5066
5067 /* At this point, the NIC is initialized and operational */
5068 il4965_rf_kill_ct_config(il);
5069
5070 D_INFO("ALIVE processing complete.\n");
5071 wake_up(&il->wait_command_queue);
5072
5073 il_power_update_mode(il, true);
5074 D_INFO("Updated power mode\n");
5075
5076 return;
5077
5078restart:
5079 queue_work(il->workqueue, &il->restart);
5080}
5081
5082static void il4965_cancel_deferred_work(struct il_priv *il);
5083
5084static void
5085__il4965_down(struct il_priv *il)
5086{
5087 unsigned long flags;
5088 int exit_pending;
5089
5090 D_INFO(DRV_NAME " is going down\n");
5091
5092 il_scan_cancel_timeout(il, 200);
5093
5094 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5095
5096 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
5097 * to prevent rearm timer */
5098 del_timer_sync(&il->watchdog);
5099
5100 il_clear_ucode_stations(il, NULL);
5101 il_dealloc_bcast_stations(il);
5102 il_clear_driver_stations(il);
5103
5104 /* Unblock any waiting calls */
5105 wake_up_all(&il->wait_command_queue);
5106
5107 /* Wipe out the EXIT_PENDING status bit if we are not actually
5108 * exiting the module */
5109 if (!exit_pending)
5110 clear_bit(S_EXIT_PENDING, &il->status);
5111
5112 /* stop and reset the on-board processor */
5113 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5114
5115 /* tell the device to stop sending interrupts */
5116 spin_lock_irqsave(&il->lock, flags);
5117 il_disable_interrupts(il);
5118 spin_unlock_irqrestore(&il->lock, flags);
5119 il4965_synchronize_irq(il);
5120
5121 if (il->mac80211_registered)
5122 ieee80211_stop_queues(il->hw);
5123
5124 /* If we have not previously called il_init() then
5125 * clear all bits but the RF Kill bit and return */
5126 if (!il_is_init(il)) {
5127 il->status =
5128 test_bit(S_RF_KILL_HW,
5129 &il->
5130 status) << S_RF_KILL_HW |
5131 test_bit(S_GEO_CONFIGURED,
5132 &il->
5133 status) << S_GEO_CONFIGURED |
5134 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5135 goto exit;
5136 }
5137
5138 /* ...otherwise clear out all the status bits but the RF Kill
5139 * bit and continue taking the NIC down. */
5140 il->status &=
5141 test_bit(S_RF_KILL_HW,
5142 &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
5143 &il->
5144 status) <<
5145 S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
5146 &il->
5147 status) << S_FW_ERROR |
5148 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5149
5150 il4965_txq_ctx_stop(il);
5151 il4965_rxq_stop(il);
5152
5153 /* Power-down device's busmaster DMA clocks */
5154 il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5155 udelay(5);
5156
5157 /* Make sure (redundant) we've released our request to stay awake */
5158 il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5159
5160 /* Stop the device, and put it in low power state */
5161 il_apm_stop(il);
5162
5163exit:
5164 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5165
5166 dev_kfree_skb(il->beacon_skb);
5167 il->beacon_skb = NULL;
5168
5169 /* clear out any free frames */
5170 il4965_clear_free_frames(il);
5171}
5172
5173static void
5174il4965_down(struct il_priv *il)
5175{
5176 mutex_lock(&il->mutex);
5177 __il4965_down(il);
5178 mutex_unlock(&il->mutex);
5179
5180 il4965_cancel_deferred_work(il);
5181}
5182
5183#define HW_READY_TIMEOUT (50)
5184
5185static int
5186il4965_set_hw_ready(struct il_priv *il)
5187{
5188 int ret = 0;
5189
5190 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5191 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5192
5193 /* See if we got it */
5194 ret =
5195 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5196 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5197 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
5198 if (ret != -ETIMEDOUT)
5199 il->hw_ready = true;
5200 else
5201 il->hw_ready = false;
5202
5203 D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
5204 return ret;
5205}
5206
5207static int
5208il4965_prepare_card_hw(struct il_priv *il)
5209{
5210 int ret = 0;
5211
5212 D_INFO("il4965_prepare_card_hw enter\n");
5213
5214 ret = il4965_set_hw_ready(il);
5215 if (il->hw_ready)
5216 return ret;
5217
5218 /* If HW is not ready, prepare the conditions to check again */
5219 il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
5220
5221 ret =
5222 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5223 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5224 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5225
5226 /* HW should be ready by now, check again. */
5227 if (ret != -ETIMEDOUT)
5228 il4965_set_hw_ready(il);
5229
5230 return ret;
5231}
5232
5233#define MAX_HW_RESTARTS 5
5234
5235static int
5236__il4965_up(struct il_priv *il)
5237{
5238 int i;
5239 int ret;
5240
5241 if (test_bit(S_EXIT_PENDING, &il->status)) {
5242 IL_WARN("Exit pending; will not bring the NIC up\n");
5243 return -EIO;
5244 }
5245
5246 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5247 IL_ERR("ucode not available for device bringup\n");
5248 return -EIO;
5249 }
5250
5251 ret = il4965_alloc_bcast_station(il, &il->ctx);
5252 if (ret) {
5253 il_dealloc_bcast_stations(il);
5254 return ret;
5255 }
5256
5257 il4965_prepare_card_hw(il);
5258
5259 if (!il->hw_ready) {
5260 IL_WARN("Exit HW not ready\n");
5261 return -EIO;
5262 }
5263
5264 /* If platform's RF_KILL switch is NOT set to KILL */
5265 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5266 clear_bit(S_RF_KILL_HW, &il->status);
5267 else
5268 set_bit(S_RF_KILL_HW, &il->status);
5269
5270 if (il_is_rfkill(il)) {
5271 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5272
5273 il_enable_interrupts(il);
5274 IL_WARN("Radio disabled by HW RF Kill switch\n");
5275 return 0;
5276 }
5277
5278 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5279
5280 /* must be initialised before il_hw_nic_init */
5281 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5282
5283 ret = il4965_hw_nic_init(il);
5284 if (ret) {
5285 IL_ERR("Unable to init nic\n");
5286 return ret;
5287 }
5288
5289 /* make sure rfkill handshake bits are cleared */
5290 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5291 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5292
5293 /* clear (again), then enable host interrupts */
5294 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5295 il_enable_interrupts(il);
5296
5297 /* really make sure rfkill handshake bits are cleared */
5298 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5299 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5300
5301 /* Copy original ucode data image from disk into backup cache.
5302 * This will be used to initialize the on-board processor's
5303 * data SRAM for a clean start when the runtime program first loads. */
5304 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5305 il->ucode_data.len);
5306
5307 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5308
5309 /* load bootstrap state machine,
5310 * load bootstrap program into processor's memory,
5311 * prepare to load the "initialize" uCode */
5312 ret = il->cfg->ops->lib->load_ucode(il);
5313
5314 if (ret) {
5315 IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
5316 continue;
5317 }
5318
5319 /* start card; "initialize" will load runtime ucode */
5320 il4965_nic_start(il);
5321
5322 D_INFO(DRV_NAME " is coming up\n");
5323
5324 return 0;
5325 }
5326
5327 set_bit(S_EXIT_PENDING, &il->status);
5328 __il4965_down(il);
5329 clear_bit(S_EXIT_PENDING, &il->status);
5330
5331 /* tried to restart and config the device for as long as our
5332 * patience could withstand */
5333 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5334 return -EIO;
5335}
5336
5337/*****************************************************************************
5338 *
5339 * Workqueue callbacks
5340 *
5341 *****************************************************************************/
5342
5343static void
5344il4965_bg_init_alive_start(struct work_struct *data)
5345{
5346 struct il_priv *il =
5347 container_of(data, struct il_priv, init_alive_start.work);
5348
5349 mutex_lock(&il->mutex);
5350 if (test_bit(S_EXIT_PENDING, &il->status))
5351 goto out;
5352
5353 il->cfg->ops->lib->init_alive_start(il);
5354out:
5355 mutex_unlock(&il->mutex);
5356}
5357
5358static void
5359il4965_bg_alive_start(struct work_struct *data)
5360{
5361 struct il_priv *il =
5362 container_of(data, struct il_priv, alive_start.work);
5363
5364 mutex_lock(&il->mutex);
5365 if (test_bit(S_EXIT_PENDING, &il->status))
5366 goto out;
5367
5368 il4965_alive_start(il);
5369out:
5370 mutex_unlock(&il->mutex);
5371}
5372
5373static void
5374il4965_bg_run_time_calib_work(struct work_struct *work)
5375{
5376 struct il_priv *il = container_of(work, struct il_priv,
5377 run_time_calib_work);
5378
5379 mutex_lock(&il->mutex);
5380
5381 if (test_bit(S_EXIT_PENDING, &il->status) ||
5382 test_bit(S_SCANNING, &il->status)) {
5383 mutex_unlock(&il->mutex);
5384 return;
5385 }
5386
5387 if (il->start_calib) {
5388 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
5389 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
5390 }
5391
5392 mutex_unlock(&il->mutex);
5393}
5394
5395static void
5396il4965_bg_restart(struct work_struct *data)
5397{
5398 struct il_priv *il = container_of(data, struct il_priv, restart);
5399
5400 if (test_bit(S_EXIT_PENDING, &il->status))
5401 return;
5402
5403 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5404 mutex_lock(&il->mutex);
5405 il->ctx.vif = NULL;
5406 il->is_open = 0;
5407
5408 __il4965_down(il);
5409
5410 mutex_unlock(&il->mutex);
5411 il4965_cancel_deferred_work(il);
5412 ieee80211_restart_hw(il->hw);
5413 } else {
5414 il4965_down(il);
5415
5416 mutex_lock(&il->mutex);
5417 if (test_bit(S_EXIT_PENDING, &il->status)) {
5418 mutex_unlock(&il->mutex);
5419 return;
5420 }
5421
5422 __il4965_up(il);
5423 mutex_unlock(&il->mutex);
5424 }
5425}
5426
5427static void
5428il4965_bg_rx_replenish(struct work_struct *data)
5429{
5430 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
5431
5432 if (test_bit(S_EXIT_PENDING, &il->status))
5433 return;
5434
5435 mutex_lock(&il->mutex);
5436 il4965_rx_replenish(il);
5437 mutex_unlock(&il->mutex);
5438}
5439
5440/*****************************************************************************
5441 *
5442 * mac80211 entry point functions
5443 *
5444 *****************************************************************************/
5445
5446#define UCODE_READY_TIMEOUT (4 * HZ)
5447
5448/*
5449 * Not a mac80211 entry point function, but it fits in with all the
5450 * other mac80211 functions grouped here.
5451 */
5452static int
5453il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5454{
5455 int ret;
5456 struct ieee80211_hw *hw = il->hw;
5457
5458 hw->rate_control_algorithm = "iwl-4965-rs";
5459
5460 /* Tell mac80211 our characteristics */
5461 hw->flags =
5462 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
5463 IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
5464 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
5465
5466 if (il->cfg->sku & IL_SKU_N)
5467 hw->flags |=
5468 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
5469 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
5470
5471 hw->sta_data_size = sizeof(struct il_station_priv);
5472 hw->vif_data_size = sizeof(struct il_vif_priv);
5473
5474 hw->wiphy->interface_modes |= il->ctx.interface_modes;
5475 hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
5476
5477 hw->wiphy->flags |=
5478 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
5479
5480 /*
5481 * For now, disable PS by default because it affects
5482 * RX performance significantly.
5483 */
5484 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5485
5486 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5487 /* we create the 802.11 header and a zero-length SSID element */
5488 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5489
5490 /* Default value; 4 EDCA QOS priorities */
5491 hw->queues = 4;
5492
5493 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5494
5495 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
5496 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5497 &il->bands[IEEE80211_BAND_2GHZ];
5498 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
5499 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5500 &il->bands[IEEE80211_BAND_5GHZ];
5501
5502 il_leds_init(il);
5503
5504 ret = ieee80211_register_hw(il->hw);
5505 if (ret) {
5506 IL_ERR("Failed to register hw (error %d)\n", ret);
5507 return ret;
5508 }
5509 il->mac80211_registered = 1;
5510
5511 return 0;
5512}
5513
5514int
5515il4965_mac_start(struct ieee80211_hw *hw)
5516{
5517 struct il_priv *il = hw->priv;
5518 int ret;
5519
5520 D_MAC80211("enter\n");
5521
5522 /* we should be verifying the device is ready to be opened */
5523 mutex_lock(&il->mutex);
5524 ret = __il4965_up(il);
5525 mutex_unlock(&il->mutex);
5526
5527 if (ret)
5528 return ret;
5529
5530 if (il_is_rfkill(il))
5531 goto out;
5532
5533 D_INFO("Start UP work done.\n");
5534
5535 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
5536 * mac80211 will not be run successfully. */
5537 ret = wait_event_timeout(il->wait_command_queue,
5538 test_bit(S_READY, &il->status),
5539 UCODE_READY_TIMEOUT);
5540 if (!ret) {
5541 if (!test_bit(S_READY, &il->status)) {
5542 IL_ERR("START_ALIVE timeout after %dms.\n",
5543 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5544 return -ETIMEDOUT;
5545 }
5546 }
5547
5548 il4965_led_enable(il);
5549
5550out:
5551 il->is_open = 1;
5552 D_MAC80211("leave\n");
5553 return 0;
5554}
5555
5556void
5557il4965_mac_stop(struct ieee80211_hw *hw)
5558{
5559 struct il_priv *il = hw->priv;
5560
5561 D_MAC80211("enter\n");
5562
5563 if (!il->is_open)
5564 return;
5565
5566 il->is_open = 0;
5567
5568 il4965_down(il);
5569
5570 flush_workqueue(il->workqueue);
5571
5572 /* User space software may expect getting rfkill changes
5573 * even if interface is down */
5574 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5575 il_enable_rfkill_int(il);
5576
5577 D_MAC80211("leave\n");
5578}
5579
5580void
5581il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
5582{
5583 struct il_priv *il = hw->priv;
5584
5585 D_MACDUMP("enter\n");
5586
5587 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5588 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5589
5590 if (il4965_tx_skb(il, skb))
5591 dev_kfree_skb_any(skb);
5592
5593 D_MACDUMP("leave\n");
5594}
5595
5596void
5597il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5598 struct ieee80211_key_conf *keyconf,
5599 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
5600{
5601 struct il_priv *il = hw->priv;
5602 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5603
5604 D_MAC80211("enter\n");
5605
5606 il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
5607 phase1key);
5608
5609 D_MAC80211("leave\n");
5610}
5611
5612int
5613il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5614 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5615 struct ieee80211_key_conf *key)
5616{
5617 struct il_priv *il = hw->priv;
5618 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5619 struct il_rxon_context *ctx = vif_priv->ctx;
5620 int ret;
5621 u8 sta_id;
5622 bool is_default_wep_key = false;
5623
5624 D_MAC80211("enter\n");
5625
5626 if (il->cfg->mod_params->sw_crypto) {
5627 D_MAC80211("leave - hwcrypto disabled\n");
5628 return -EOPNOTSUPP;
5629 }
5630
5631 sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
5632 if (sta_id == IL_INVALID_STATION)
5633 return -EINVAL;
5634
5635 mutex_lock(&il->mutex);
5636 il_scan_cancel_timeout(il, 100);
5637
5638 /*
5639 * If we are getting WEP group key and we didn't receive any key mapping
5640 * so far, we are in legacy wep mode (group key only), otherwise we are
5641 * in 1X mode.
5642 * In legacy wep mode, we use another host command to the uCode.
5643 */
5644 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5645 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
5646 if (cmd == SET_KEY)
5647 is_default_wep_key = !ctx->key_mapping_keys;
5648 else
5649 is_default_wep_key =
5650 (key->hw_key_idx == HW_KEY_DEFAULT);
5651 }
5652
5653 switch (cmd) {
5654 case SET_KEY:
5655 if (is_default_wep_key)
5656 ret =
5657 il4965_set_default_wep_key(il, vif_priv->ctx, key);
5658 else
5659 ret =
5660 il4965_set_dynamic_key(il, vif_priv->ctx, key,
5661 sta_id);
5662
5663 D_MAC80211("enable hwcrypto key\n");
5664 break;
5665 case DISABLE_KEY:
5666 if (is_default_wep_key)
5667 ret = il4965_remove_default_wep_key(il, ctx, key);
5668 else
5669 ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
5670
5671 D_MAC80211("disable hwcrypto key\n");
5672 break;
5673 default:
5674 ret = -EINVAL;
5675 }
5676
5677 mutex_unlock(&il->mutex);
5678 D_MAC80211("leave\n");
5679
5680 return ret;
5681}
5682
5683int
5684il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5685 enum ieee80211_ampdu_mlme_action action,
5686 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
5687 u8 buf_size)
5688{
5689 struct il_priv *il = hw->priv;
5690 int ret = -EINVAL;
5691
5692 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
5693
5694 if (!(il->cfg->sku & IL_SKU_N))
5695 return -EACCES;
5696
5697 mutex_lock(&il->mutex);
5698
5699 switch (action) {
5700 case IEEE80211_AMPDU_RX_START:
5701 D_HT("start Rx\n");
5702 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
5703 break;
5704 case IEEE80211_AMPDU_RX_STOP:
5705 D_HT("stop Rx\n");
5706 ret = il4965_sta_rx_agg_stop(il, sta, tid);
5707 if (test_bit(S_EXIT_PENDING, &il->status))
5708 ret = 0;
5709 break;
5710 case IEEE80211_AMPDU_TX_START:
5711 D_HT("start Tx\n");
5712 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
5713 break;
5714 case IEEE80211_AMPDU_TX_STOP:
5715 D_HT("stop Tx\n");
5716 ret = il4965_tx_agg_stop(il, vif, sta, tid);
5717 if (test_bit(S_EXIT_PENDING, &il->status))
5718 ret = 0;
5719 break;
5720 case IEEE80211_AMPDU_TX_OPERATIONAL:
5721 ret = 0;
5722 break;
5723 }
5724 mutex_unlock(&il->mutex);
5725
5726 return ret;
5727}
5728
5729int
5730il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5731 struct ieee80211_sta *sta)
5732{
5733 struct il_priv *il = hw->priv;
5734 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
5735 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5736 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
5737 int ret;
5738 u8 sta_id;
5739
5740 D_INFO("received request to add station %pM\n", sta->addr);
5741 mutex_lock(&il->mutex);
5742 D_INFO("proceeding to add station %pM\n", sta->addr);
5743 sta_priv->common.sta_id = IL_INVALID_STATION;
5744
5745 atomic_set(&sta_priv->pending_frames, 0);
5746
5747 ret =
5748 il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
5749 &sta_id);
5750 if (ret) {
5751 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
5752 /* Should we return success if return code is EEXIST ? */
5753 mutex_unlock(&il->mutex);
5754 return ret;
5755 }
5756
5757 sta_priv->common.sta_id = sta_id;
5758
5759 /* Initialize rate scaling */
5760 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
5761 il4965_rs_rate_init(il, sta, sta_id);
5762 mutex_unlock(&il->mutex);
5763
5764 return 0;
5765}
5766
5767void
5768il4965_mac_channel_switch(struct ieee80211_hw *hw,
5769 struct ieee80211_channel_switch *ch_switch)
5770{
5771 struct il_priv *il = hw->priv;
5772 const struct il_channel_info *ch_info;
5773 struct ieee80211_conf *conf = &hw->conf;
5774 struct ieee80211_channel *channel = ch_switch->channel;
5775 struct il_ht_config *ht_conf = &il->current_ht_config;
5776
5777 struct il_rxon_context *ctx = &il->ctx;
5778 u16 ch;
5779
5780 D_MAC80211("enter\n");
5781
5782 mutex_lock(&il->mutex);
5783
5784 if (il_is_rfkill(il))
5785 goto out;
5786
5787 if (test_bit(S_EXIT_PENDING, &il->status) ||
5788 test_bit(S_SCANNING, &il->status) ||
5789 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
5790 goto out;
5791
5792 if (!il_is_associated_ctx(ctx))
5793 goto out;
5794
5795 if (!il->cfg->ops->lib->set_channel_switch)
5796 goto out;
5797
5798 ch = channel->hw_value;
5799 if (le16_to_cpu(ctx->active.channel) == ch)
5800 goto out;
5801
5802 ch_info = il_get_channel_info(il, channel->band, ch);
5803 if (!il_is_channel_valid(ch_info)) {
5804 D_MAC80211("invalid channel\n");
5805 goto out;
5806 }
5807
5808 spin_lock_irq(&il->lock);
5809
5810 il->current_ht_config.smps = conf->smps_mode;
5811
5812 /* Configure HT40 channels */
5813 ctx->ht.enabled = conf_is_ht(conf);
5814 if (ctx->ht.enabled) {
5815 if (conf_is_ht40_minus(conf)) {
5816 ctx->ht.extension_chan_offset =
5817 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5818 ctx->ht.is_40mhz = true;
5819 } else if (conf_is_ht40_plus(conf)) {
5820 ctx->ht.extension_chan_offset =
5821 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5822 ctx->ht.is_40mhz = true;
5823 } else {
5824 ctx->ht.extension_chan_offset =
5825 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5826 ctx->ht.is_40mhz = false;
5827 }
5828 } else
5829 ctx->ht.is_40mhz = false;
5830
5831 if ((le16_to_cpu(ctx->staging.channel) != ch))
5832 ctx->staging.flags = 0;
5833
5834 il_set_rxon_channel(il, channel, ctx);
5835 il_set_rxon_ht(il, ht_conf);
5836 il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
5837
5838 spin_unlock_irq(&il->lock);
5839
5840 il_set_rate(il);
5841 /*
5842 * at this point, staging_rxon has the
5843 * configuration for channel switch
5844 */
5845 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
5846 il->switch_channel = cpu_to_le16(ch);
5847 if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
5848 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
5849 il->switch_channel = 0;
5850 ieee80211_chswitch_done(ctx->vif, false);
5851 }
5852
5853out:
5854 mutex_unlock(&il->mutex);
5855 D_MAC80211("leave\n");
5856}
5857
5858void
5859il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
5860 unsigned int *total_flags, u64 multicast)
5861{
5862 struct il_priv *il = hw->priv;
5863 __le32 filter_or = 0, filter_nand = 0;
5864
5865#define CHK(test, flag) do { \
5866 if (*total_flags & (test)) \
5867 filter_or |= (flag); \
5868 else \
5869 filter_nand |= (flag); \
5870 } while (0)
5871
5872 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
5873 *total_flags);
5874
5875 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
5876 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
5877 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
5878 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
5879
5880#undef CHK
5881
5882 mutex_lock(&il->mutex);
5883
5884 il->ctx.staging.filter_flags &= ~filter_nand;
5885 il->ctx.staging.filter_flags |= filter_or;
5886
5887 /*
5888 * Not committing directly because hardware can perform a scan,
5889 * but we'll eventually commit the filter flags change anyway.
5890 */
5891
5892 mutex_unlock(&il->mutex);
5893
5894 /*
5895 * Receiving all multicast frames is always enabled by the
5896 * default flags setup in il_connection_init_rx_config()
5897 * since we currently do not support programming multicast
5898 * filters into the device.
5899 */
5900 *total_flags &=
5901 FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
5902 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
5903}
5904
5905/*****************************************************************************
5906 *
5907 * driver setup and teardown
5908 *
5909 *****************************************************************************/
5910
5911static void
5912il4965_bg_txpower_work(struct work_struct *work)
5913{
5914 struct il_priv *il = container_of(work, struct il_priv,
5915 txpower_work);
5916
5917 mutex_lock(&il->mutex);
5918
5919 /* If a scan happened to start before we got here
5920 * then just return; the stats notification will
5921 * kick off another scheduled work to compensate for
5922 * any temperature delta we missed here. */
5923 if (test_bit(S_EXIT_PENDING, &il->status) ||
5924 test_bit(S_SCANNING, &il->status))
5925 goto out;
5926
5927 /* Regardless of if we are associated, we must reconfigure the
5928 * TX power since frames can be sent on non-radar channels while
5929 * not associated */
5930 il->cfg->ops->lib->send_tx_power(il);
5931
5932 /* Update last_temperature to keep is_calib_needed from running
5933 * when it isn't needed... */
5934 il->last_temperature = il->temperature;
5935out:
5936 mutex_unlock(&il->mutex);
5937}
5938
5939static void
5940il4965_setup_deferred_work(struct il_priv *il)
5941{
5942 il->workqueue = create_singlethread_workqueue(DRV_NAME);
5943
5944 init_waitqueue_head(&il->wait_command_queue);
5945
5946 INIT_WORK(&il->restart, il4965_bg_restart);
5947 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
5948 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
5949 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
5950 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
5951
5952 il_setup_scan_deferred_work(il);
5953
5954 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
5955
5956 init_timer(&il->stats_periodic);
5957 il->stats_periodic.data = (unsigned long)il;
5958 il->stats_periodic.function = il4965_bg_stats_periodic;
5959
5960 init_timer(&il->watchdog);
5961 il->watchdog.data = (unsigned long)il;
5962 il->watchdog.function = il_bg_watchdog;
5963
5964 tasklet_init(&il->irq_tasklet,
5965 (void (*)(unsigned long))il4965_irq_tasklet,
5966 (unsigned long)il);
5967}
5968
5969static void
5970il4965_cancel_deferred_work(struct il_priv *il)
5971{
5972 cancel_work_sync(&il->txpower_work);
5973 cancel_delayed_work_sync(&il->init_alive_start);
5974 cancel_delayed_work(&il->alive_start);
5975 cancel_work_sync(&il->run_time_calib_work);
5976
5977 il_cancel_scan_deferred_work(il);
5978
5979 del_timer_sync(&il->stats_periodic);
5980}
5981
5982static void
5983il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
5984{
5985 int i;
5986
5987 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
5988 rates[i].bitrate = il_rates[i].ieee * 5;
5989 rates[i].hw_value = i; /* Rate scaling will work on idxes */
5990 rates[i].hw_value_short = i;
5991 rates[i].flags = 0;
5992 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
5993 /*
5994 * If CCK != 1M then set short preamble rate flag.
5995 */
5996 rates[i].flags |=
5997 (il_rates[i].plcp ==
5998 RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
5999 }
6000 }
6001}
6002
6003/*
6004 * Acquire il->lock before calling this function !
6005 */
6006void
6007il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6008{
6009 il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
6010 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6011}
6012
6013void
6014il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6015 int tx_fifo_id, int scd_retry)
6016{
6017 int txq_id = txq->q.id;
6018
6019 /* Find out whether to activate Tx queue */
6020 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6021
6022 /* Set up and activate */
6023 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6024 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6025 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6026 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6027 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6028 IL49_SCD_QUEUE_STTS_REG_MSK);
6029
6030 txq->sched_retry = scd_retry;
6031
6032 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
6033 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6034}
6035
6036static int
6037il4965_init_drv(struct il_priv *il)
6038{
6039 int ret;
6040
6041 spin_lock_init(&il->sta_lock);
6042 spin_lock_init(&il->hcmd_lock);
6043
6044 INIT_LIST_HEAD(&il->free_frames);
6045
6046 mutex_init(&il->mutex);
6047
6048 il->ieee_channels = NULL;
6049 il->ieee_rates = NULL;
6050 il->band = IEEE80211_BAND_2GHZ;
6051
6052 il->iw_mode = NL80211_IFTYPE_STATION;
6053 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6054 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6055
6056 /* initialize force reset */
6057 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6058
6059 /* Choose which receivers/antennas to use */
6060 if (il->cfg->ops->hcmd->set_rxon_chain)
6061 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
6062
6063 il_init_scan_params(il);
6064
6065 ret = il_init_channel_map(il);
6066 if (ret) {
6067 IL_ERR("initializing regulatory failed: %d\n", ret);
6068 goto err;
6069 }
6070
6071 ret = il_init_geos(il);
6072 if (ret) {
6073 IL_ERR("initializing geos failed: %d\n", ret);
6074 goto err_free_channel_map;
6075 }
6076 il4965_init_hw_rates(il, il->ieee_rates);
6077
6078 return 0;
6079
6080err_free_channel_map:
6081 il_free_channel_map(il);
6082err:
6083 return ret;
6084}
6085
6086static void
6087il4965_uninit_drv(struct il_priv *il)
6088{
6089 il4965_calib_free_results(il);
6090 il_free_geos(il);
6091 il_free_channel_map(il);
6092 kfree(il->scan_cmd);
6093}
6094
6095static void
6096il4965_hw_detect(struct il_priv *il)
6097{
6098 il->hw_rev = _il_rd(il, CSR_HW_REV);
6099 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6100 il->rev_id = il->pci_dev->revision;
6101 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6102}
6103
6104static int
6105il4965_set_hw_params(struct il_priv *il)
6106{
6107 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6108 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6109 if (il->cfg->mod_params->amsdu_size_8K)
6110 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6111 else
6112 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6113
6114 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6115
6116 if (il->cfg->mod_params->disable_11n)
6117 il->cfg->sku &= ~IL_SKU_N;
6118
6119 /* Device-specific setup */
6120 return il->cfg->ops->lib->set_hw_params(il);
6121}
6122
6123static const u8 il4965_bss_ac_to_fifo[] = {
6124 IL_TX_FIFO_VO,
6125 IL_TX_FIFO_VI,
6126 IL_TX_FIFO_BE,
6127 IL_TX_FIFO_BK,
6128};
6129
6130static const u8 il4965_bss_ac_to_queue[] = {
6131 0, 1, 2, 3,
6132};
6133
6134static int
6135il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6136{
6137 int err = 0;
6138 struct il_priv *il;
6139 struct ieee80211_hw *hw;
6140 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6141 unsigned long flags;
6142 u16 pci_cmd;
6143
6144 /************************
6145 * 1. Allocating HW data
6146 ************************/
6147
6148 hw = il_alloc_all(cfg);
6149 if (!hw) {
6150 err = -ENOMEM;
6151 goto out;
6152 }
6153 il = hw->priv;
6154 /* At this point both hw and il are allocated. */
6155
6156 il->ctx.ctxid = 0;
6157
6158 il->ctx.always_active = true;
6159 il->ctx.is_active = true;
6160 il->ctx.rxon_cmd = C_RXON;
6161 il->ctx.rxon_timing_cmd = C_RXON_TIMING;
6162 il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
6163 il->ctx.qos_cmd = C_QOS_PARAM;
6164 il->ctx.ap_sta_id = IL_AP_ID;
6165 il->ctx.wep_key_cmd = C_WEPKEY;
6166 il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
6167 il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
6168 il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
6169 il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
6170 il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
6171 il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
6172 il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
6173 il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
6174
6175 SET_IEEE80211_DEV(hw, &pdev->dev);
6176
6177 D_INFO("*** LOAD DRIVER ***\n");
6178 il->cfg = cfg;
6179 il->pci_dev = pdev;
6180 il->inta_mask = CSR_INI_SET_MASK;
6181
6182 if (il_alloc_traffic_mem(il))
6183 IL_ERR("Not enough memory to generate traffic log\n");
6184
6185 /**************************
6186 * 2. Initializing PCI bus
6187 **************************/
6188 pci_disable_link_state(pdev,
6189 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6190 PCIE_LINK_STATE_CLKPM);
6191
6192 if (pci_enable_device(pdev)) {
6193 err = -ENODEV;
6194 goto out_ieee80211_free_hw;
6195 }
6196
6197 pci_set_master(pdev);
6198
6199 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6200 if (!err)
6201 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6202 if (err) {
6203 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6204 if (!err)
6205 err =
6206 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6207 /* both attempts failed: */
6208 if (err) {
6209 IL_WARN("No suitable DMA available.\n");
6210 goto out_pci_disable_device;
6211 }
6212 }
6213
6214 err = pci_request_regions(pdev, DRV_NAME);
6215 if (err)
6216 goto out_pci_disable_device;
6217
6218 pci_set_drvdata(pdev, il);
6219
6220 /***********************
6221 * 3. Read REV register
6222 ***********************/
6223 il->hw_base = pci_iomap(pdev, 0, 0);
6224 if (!il->hw_base) {
6225 err = -ENODEV;
6226 goto out_pci_release_regions;
6227 }
6228
6229 D_INFO("pci_resource_len = 0x%08llx\n",
6230 (unsigned long long)pci_resource_len(pdev, 0));
6231 D_INFO("pci_resource_base = %p\n", il->hw_base);
6232
6233 /* these spin locks will be used in apm_ops.init and EEPROM access
6234 * we should init now
6235 */
6236 spin_lock_init(&il->reg_lock);
6237 spin_lock_init(&il->lock);
6238
6239 /*
6240 * stop and reset the on-board processor just in case it is in a
6241 * strange state ... like being left stranded by a primary kernel
6242 * and this is now the kdump kernel trying to start up
6243 */
6244 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6245
6246 il4965_hw_detect(il);
6247 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
6248
6249 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6250 * PCI Tx retries from interfering with C3 CPU state */
6251 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6252
6253 il4965_prepare_card_hw(il);
6254 if (!il->hw_ready) {
6255 IL_WARN("Failed, HW not ready\n");
6256 goto out_iounmap;
6257 }
6258
6259 /*****************
6260 * 4. Read EEPROM
6261 *****************/
6262 /* Read the EEPROM */
6263 err = il_eeprom_init(il);
6264 if (err) {
6265 IL_ERR("Unable to init EEPROM\n");
6266 goto out_iounmap;
6267 }
6268 err = il4965_eeprom_check_version(il);
6269 if (err)
6270 goto out_free_eeprom;
6271
6272 if (err)
6273 goto out_free_eeprom;
6274
6275 /* extract MAC Address */
6276 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6277 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6278 il->hw->wiphy->addresses = il->addresses;
6279 il->hw->wiphy->n_addresses = 1;
6280
6281 /************************
6282 * 5. Setup HW constants
6283 ************************/
6284 if (il4965_set_hw_params(il)) {
6285 IL_ERR("failed to set hw parameters\n");
6286 goto out_free_eeprom;
6287 }
6288
6289 /*******************
6290 * 6. Setup il
6291 *******************/
6292
6293 err = il4965_init_drv(il);
6294 if (err)
6295 goto out_free_eeprom;
6296 /* At this point both hw and il are initialized. */
6297
6298 /********************
6299 * 7. Setup services
6300 ********************/
6301 spin_lock_irqsave(&il->lock, flags);
6302 il_disable_interrupts(il);
6303 spin_unlock_irqrestore(&il->lock, flags);
6304
6305 pci_enable_msi(il->pci_dev);
6306
6307 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
6308 if (err) {
6309 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6310 goto out_disable_msi;
6311 }
6312
6313 il4965_setup_deferred_work(il);
6314 il4965_setup_handlers(il);
6315
6316 /*********************************************
6317 * 8. Enable interrupts and read RFKILL state
6318 *********************************************/
6319
6320 /* enable rfkill interrupt: hw bug w/a */
6321 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6322 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6323 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6324 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6325 }
6326
6327 il_enable_rfkill_int(il);
6328
6329 /* If platform's RF_KILL switch is NOT set to KILL */
6330 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6331 clear_bit(S_RF_KILL_HW, &il->status);
6332 else
6333 set_bit(S_RF_KILL_HW, &il->status);
6334
6335 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6336 test_bit(S_RF_KILL_HW, &il->status));
6337
6338 il_power_initialize(il);
6339
6340 init_completion(&il->_4965.firmware_loading_complete);
6341
6342 err = il4965_request_firmware(il, true);
6343 if (err)
6344 goto out_destroy_workqueue;
6345
6346 return 0;
6347
6348out_destroy_workqueue:
6349 destroy_workqueue(il->workqueue);
6350 il->workqueue = NULL;
6351 free_irq(il->pci_dev->irq, il);
6352out_disable_msi:
6353 pci_disable_msi(il->pci_dev);
6354 il4965_uninit_drv(il);
6355out_free_eeprom:
6356 il_eeprom_free(il);
6357out_iounmap:
6358 pci_iounmap(pdev, il->hw_base);
6359out_pci_release_regions:
6360 pci_set_drvdata(pdev, NULL);
6361 pci_release_regions(pdev);
6362out_pci_disable_device:
6363 pci_disable_device(pdev);
6364out_ieee80211_free_hw:
6365 il_free_traffic_mem(il);
6366 ieee80211_free_hw(il->hw);
6367out:
6368 return err;
6369}
6370
6371static void __devexit
6372il4965_pci_remove(struct pci_dev *pdev)
6373{
6374 struct il_priv *il = pci_get_drvdata(pdev);
6375 unsigned long flags;
6376
6377 if (!il)
6378 return;
6379
6380 wait_for_completion(&il->_4965.firmware_loading_complete);
6381
6382 D_INFO("*** UNLOAD DRIVER ***\n");
6383
6384 il_dbgfs_unregister(il);
6385 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6386
6387 /* ieee80211_unregister_hw call wil cause il_mac_stop to
6388 * to be called and il4965_down since we are removing the device
6389 * we need to set S_EXIT_PENDING bit.
6390 */
6391 set_bit(S_EXIT_PENDING, &il->status);
6392
6393 il_leds_exit(il);
6394
6395 if (il->mac80211_registered) {
6396 ieee80211_unregister_hw(il->hw);
6397 il->mac80211_registered = 0;
6398 } else {
6399 il4965_down(il);
6400 }
6401
6402 /*
6403 * Make sure device is reset to low power before unloading driver.
6404 * This may be redundant with il4965_down(), but there are paths to
6405 * run il4965_down() without calling apm_ops.stop(), and there are
6406 * paths to avoid running il4965_down() at all before leaving driver.
6407 * This (inexpensive) call *makes sure* device is reset.
6408 */
6409 il_apm_stop(il);
6410
6411 /* make sure we flush any pending irq or
6412 * tasklet for the driver
6413 */
6414 spin_lock_irqsave(&il->lock, flags);
6415 il_disable_interrupts(il);
6416 spin_unlock_irqrestore(&il->lock, flags);
6417
6418 il4965_synchronize_irq(il);
6419
6420 il4965_dealloc_ucode_pci(il);
6421
6422 if (il->rxq.bd)
6423 il4965_rx_queue_free(il, &il->rxq);
6424 il4965_hw_txq_ctx_free(il);
6425
6426 il_eeprom_free(il);
6427
6428 /*netif_stop_queue(dev); */
6429 flush_workqueue(il->workqueue);
6430
6431 /* ieee80211_unregister_hw calls il_mac_stop, which flushes
6432 * il->workqueue... so we can't take down the workqueue
6433 * until now... */
6434 destroy_workqueue(il->workqueue);
6435 il->workqueue = NULL;
6436 il_free_traffic_mem(il);
6437
6438 free_irq(il->pci_dev->irq, il);
6439 pci_disable_msi(il->pci_dev);
6440 pci_iounmap(pdev, il->hw_base);
6441 pci_release_regions(pdev);
6442 pci_disable_device(pdev);
6443 pci_set_drvdata(pdev, NULL);
6444
6445 il4965_uninit_drv(il);
6446
6447 dev_kfree_skb(il->beacon_skb);
6448
6449 ieee80211_free_hw(il->hw);
6450}
6451
6452/*
6453 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
6454 * must be called under il->lock and mac access
6455 */
6456void
6457il4965_txq_set_sched(struct il_priv *il, u32 mask)
6458{
6459 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6460}
6461
6462/*****************************************************************************
6463 *
6464 * driver and module entry point
6465 *
6466 *****************************************************************************/
6467
6468/* Hardware specific file defines the PCI IDs table for that hardware module */
6469static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
6470 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6471 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6472 {0}
6473};
6474MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6475
6476static struct pci_driver il4965_driver = {
6477 .name = DRV_NAME,
6478 .id_table = il4965_hw_card_ids,
6479 .probe = il4965_pci_probe,
6480 .remove = __devexit_p(il4965_pci_remove),
6481 .driver.pm = IL_LEGACY_PM_OPS,
6482};
6483
6484static int __init
6485il4965_init(void)
6486{
6487
6488 int ret;
6489 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6490 pr_info(DRV_COPYRIGHT "\n");
6491
6492 ret = il4965_rate_control_register();
6493 if (ret) {
6494 pr_err("Unable to register rate control algorithm: %d\n", ret);
6495 return ret;
6496 }
6497
6498 ret = pci_register_driver(&il4965_driver);
6499 if (ret) {
6500 pr_err("Unable to initialize PCI module\n");
6501 goto error_register;
6502 }
6503
6504 return ret;
6505
6506error_register:
6507 il4965_rate_control_unregister();
6508 return ret;
6509}
6510
6511static void __exit
6512il4965_exit(void)
6513{
6514 pci_unregister_driver(&il4965_driver);
6515 il4965_rate_control_unregister();
6516}
6517
6518module_exit(il4965_exit);
6519module_init(il4965_init);
6520
6521#ifdef CONFIG_IWLEGACY_DEBUG
6522module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
6523MODULE_PARM_DESC(debug, "debug output mask");
6524#endif
6525
6526module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
6527MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6528module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
6529MODULE_PARM_DESC(queues_num, "number of hw queues.");
6530module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
6531MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6532module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
6533 S_IRUGO);
6534MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
6535module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
6536MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
new file mode 100644
index 000000000000..467d0cb14ecd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -0,0 +1,2860 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37
38#include "common.h"
39#include "4965.h"
40
41#define IL4965_RS_NAME "iwl-4965-rs"
42
43#define NUM_TRY_BEFORE_ANT_TOGGLE 1
44#define IL_NUMBER_TRY 1
45#define IL_HT_NUMBER_TRY 3
46
47#define RATE_MAX_WINDOW 62 /* # tx in history win */
48#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
49#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
50
51/* max allowed rate miss before sync LQ cmd */
52#define IL_MISSED_RATE_MAX 15
53/* max time to accum history 2 seconds */
54#define RATE_SCALE_FLUSH_INTVL (3*HZ)
55
56static u8 rs_ht_to_legacy[] = {
57 RATE_6M_IDX, RATE_6M_IDX,
58 RATE_6M_IDX, RATE_6M_IDX,
59 RATE_6M_IDX,
60 RATE_6M_IDX, RATE_9M_IDX,
61 RATE_12M_IDX, RATE_18M_IDX,
62 RATE_24M_IDX, RATE_36M_IDX,
63 RATE_48M_IDX, RATE_54M_IDX
64};
65
66static const u8 ant_toggle_lookup[] = {
67 /*ANT_NONE -> */ ANT_NONE,
68 /*ANT_A -> */ ANT_B,
69 /*ANT_B -> */ ANT_C,
70 /*ANT_AB -> */ ANT_BC,
71 /*ANT_C -> */ ANT_A,
72 /*ANT_AC -> */ ANT_AB,
73 /*ANT_BC -> */ ANT_AC,
74 /*ANT_ABC -> */ ANT_ABC,
75};
76
77#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
78 [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
79 RATE_SISO_##s##M_PLCP, \
80 RATE_MIMO2_##s##M_PLCP,\
81 RATE_##r##M_IEEE, \
82 RATE_##ip##M_IDX, \
83 RATE_##in##M_IDX, \
84 RATE_##rp##M_IDX, \
85 RATE_##rn##M_IDX, \
86 RATE_##pp##M_IDX, \
87 RATE_##np##M_IDX }
88
89/*
90 * Parameter order:
91 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
92 *
93 * If there isn't a valid next or previous rate then INV is used which
94 * maps to RATE_INVALID
95 *
96 */
97const struct il_rate_info il_rates[RATE_COUNT] = {
98 IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
99 IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
100 IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
101 IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
102 IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
103 IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
104 IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
105 IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
106 IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
107 IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
108 IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
109 IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
110 IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
111};
112
113static int
114il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
115{
116 int idx = 0;
117
118 /* HT rate format */
119 if (rate_n_flags & RATE_MCS_HT_MSK) {
120 idx = (rate_n_flags & 0xff);
121
122 if (idx >= RATE_MIMO2_6M_PLCP)
123 idx = idx - RATE_MIMO2_6M_PLCP;
124
125 idx += IL_FIRST_OFDM_RATE;
126 /* skip 9M not supported in ht */
127 if (idx >= RATE_9M_IDX)
128 idx += 1;
129 if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
130 return idx;
131
132 /* legacy rate format, search for match in table */
133 } else {
134 for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
135 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
136 return idx;
137 }
138
139 return -1;
140}
141
142static void il4965_rs_rate_scale_perform(struct il_priv *il,
143 struct sk_buff *skb,
144 struct ieee80211_sta *sta,
145 struct il_lq_sta *lq_sta);
146static void il4965_rs_fill_link_cmd(struct il_priv *il,
147 struct il_lq_sta *lq_sta, u32 rate_n_flags);
148static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
149 bool force_search);
150
151#ifdef CONFIG_MAC80211_DEBUGFS
152static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
153 u32 *rate_n_flags, int idx);
154#else
155static void
156il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
157{
158}
159#endif
160
161/**
162 * The following tables contain the expected throughput metrics for all rates
163 *
164 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
165 *
166 * where invalid entries are zeros.
167 *
168 * CCK rates are only valid in legacy table and will only be used in G
169 * (2.4 GHz) band.
170 */
171
172static s32 expected_tpt_legacy[RATE_COUNT] = {
173 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
174};
175
176static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
177 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
178 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
179 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
180 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
181};
182
183static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
184 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
185 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
186 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
187 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
188};
189
190static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
191 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
192 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
193 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
194 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */
195};
196
197static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
198 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
199 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
200 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
201 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
202};
203
204/* mbps, mcs */
205static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
206 {"1", "BPSK DSSS"},
207 {"2", "QPSK DSSS"},
208 {"5.5", "BPSK CCK"},
209 {"11", "QPSK CCK"},
210 {"6", "BPSK 1/2"},
211 {"9", "BPSK 1/2"},
212 {"12", "QPSK 1/2"},
213 {"18", "QPSK 3/4"},
214 {"24", "16QAM 1/2"},
215 {"36", "16QAM 3/4"},
216 {"48", "64QAM 2/3"},
217 {"54", "64QAM 3/4"},
218 {"60", "64QAM 5/6"},
219};
220
221#define MCS_IDX_PER_STREAM (8)
222
223static inline u8
224il4965_rs_extract_rate(u32 rate_n_flags)
225{
226 return (u8) (rate_n_flags & 0xFF);
227}
228
229static void
230il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
231{
232 win->data = 0;
233 win->success_counter = 0;
234 win->success_ratio = IL_INVALID_VALUE;
235 win->counter = 0;
236 win->average_tpt = IL_INVALID_VALUE;
237 win->stamp = 0;
238}
239
240static inline u8
241il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
242{
243 return (ant_type & valid_antenna) == ant_type;
244}
245
246/*
247 * removes the old data from the stats. All data that is older than
248 * TID_MAX_TIME_DIFF, will be deleted.
249 */
250static void
251il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
252{
253 /* The oldest age we want to keep */
254 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
255
256 while (tl->queue_count && tl->time_stamp < oldest_time) {
257 tl->total -= tl->packet_count[tl->head];
258 tl->packet_count[tl->head] = 0;
259 tl->time_stamp += TID_QUEUE_CELL_SPACING;
260 tl->queue_count--;
261 tl->head++;
262 if (tl->head >= TID_QUEUE_MAX_SIZE)
263 tl->head = 0;
264 }
265}
266
267/*
268 * increment traffic load value for tid and also remove
269 * any old values if passed the certain time period
270 */
271static u8
272il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
273{
274 u32 curr_time = jiffies_to_msecs(jiffies);
275 u32 time_diff;
276 s32 idx;
277 struct il_traffic_load *tl = NULL;
278 u8 tid;
279
280 if (ieee80211_is_data_qos(hdr->frame_control)) {
281 u8 *qc = ieee80211_get_qos_ctl(hdr);
282 tid = qc[0] & 0xf;
283 } else
284 return MAX_TID_COUNT;
285
286 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
287 return MAX_TID_COUNT;
288
289 tl = &lq_data->load[tid];
290
291 curr_time -= curr_time % TID_ROUND_VALUE;
292
293 /* Happens only for the first packet. Initialize the data */
294 if (!(tl->queue_count)) {
295 tl->total = 1;
296 tl->time_stamp = curr_time;
297 tl->queue_count = 1;
298 tl->head = 0;
299 tl->packet_count[0] = 1;
300 return MAX_TID_COUNT;
301 }
302
303 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
304 idx = time_diff / TID_QUEUE_CELL_SPACING;
305
306 /* The history is too long: remove data that is older than */
307 /* TID_MAX_TIME_DIFF */
308 if (idx >= TID_QUEUE_MAX_SIZE)
309 il4965_rs_tl_rm_old_stats(tl, curr_time);
310
311 idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
312 tl->packet_count[idx] = tl->packet_count[idx] + 1;
313 tl->total = tl->total + 1;
314
315 if ((idx + 1) > tl->queue_count)
316 tl->queue_count = idx + 1;
317
318 return tid;
319}
320
321/*
322 get the traffic load value for tid
323*/
324static u32
325il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
326{
327 u32 curr_time = jiffies_to_msecs(jiffies);
328 u32 time_diff;
329 s32 idx;
330 struct il_traffic_load *tl = NULL;
331
332 if (tid >= TID_MAX_LOAD_COUNT)
333 return 0;
334
335 tl = &(lq_data->load[tid]);
336
337 curr_time -= curr_time % TID_ROUND_VALUE;
338
339 if (!(tl->queue_count))
340 return 0;
341
342 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
343 idx = time_diff / TID_QUEUE_CELL_SPACING;
344
345 /* The history is too long: remove data that is older than */
346 /* TID_MAX_TIME_DIFF */
347 if (idx >= TID_QUEUE_MAX_SIZE)
348 il4965_rs_tl_rm_old_stats(tl, curr_time);
349
350 return tl->total;
351}
352
353static int
354il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
355 u8 tid, struct ieee80211_sta *sta)
356{
357 int ret = -EAGAIN;
358 u32 load;
359
360 load = il4965_rs_tl_get_load(lq_data, tid);
361
362 if (load > IL_AGG_LOAD_THRESHOLD) {
363 D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
364 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
365 if (ret == -EAGAIN) {
366 /*
367 * driver and mac80211 is out of sync
368 * this might be cause by reloading firmware
369 * stop the tx ba session here
370 */
371 IL_ERR("Fail start Tx agg on tid: %d\n", tid);
372 ieee80211_stop_tx_ba_session(sta, tid);
373 }
374 } else
375 D_HT("Aggregation not enabled for tid %d because load = %u\n",
376 tid, load);
377
378 return ret;
379}
380
381static void
382il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
383 struct ieee80211_sta *sta)
384{
385 if (tid < TID_MAX_LOAD_COUNT)
386 il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
387 else
388 IL_ERR("tid exceeds max load count: %d/%d\n", tid,
389 TID_MAX_LOAD_COUNT);
390}
391
392static inline int
393il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
394{
395 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
398}
399
400/*
401 * Static function to get the expected throughput from an il_scale_tbl_info
402 * that wraps a NULL pointer check
403 */
404static s32
405il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
406{
407 if (tbl->expected_tpt)
408 return tbl->expected_tpt[rs_idx];
409 return 0;
410}
411
412/**
413 * il4965_rs_collect_tx_data - Update the success/failure sliding win
414 *
415 * We keep a sliding win of the last 62 packets transmitted
416 * at this rate. win->data contains the bitmask of successful
417 * packets.
418 */
419static int
420il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
421 int attempts, int successes)
422{
423 struct il_rate_scale_data *win = NULL;
424 static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
425 s32 fail_count, tpt;
426
427 if (scale_idx < 0 || scale_idx >= RATE_COUNT)
428 return -EINVAL;
429
430 /* Select win for current tx bit rate */
431 win = &(tbl->win[scale_idx]);
432
433 /* Get expected throughput */
434 tpt = il4965_get_expected_tpt(tbl, scale_idx);
435
436 /*
437 * Keep track of only the latest 62 tx frame attempts in this rate's
438 * history win; anything older isn't really relevant any more.
439 * If we have filled up the sliding win, drop the oldest attempt;
440 * if the oldest attempt (highest bit in bitmap) shows "success",
441 * subtract "1" from the success counter (this is the main reason
442 * we keep these bitmaps!).
443 */
444 while (attempts > 0) {
445 if (win->counter >= RATE_MAX_WINDOW) {
446
447 /* remove earliest */
448 win->counter = RATE_MAX_WINDOW - 1;
449
450 if (win->data & mask) {
451 win->data &= ~mask;
452 win->success_counter--;
453 }
454 }
455
456 /* Increment frames-attempted counter */
457 win->counter++;
458
459 /* Shift bitmap by one frame to throw away oldest history */
460 win->data <<= 1;
461
462 /* Mark the most recent #successes attempts as successful */
463 if (successes > 0) {
464 win->success_counter++;
465 win->data |= 0x1;
466 successes--;
467 }
468
469 attempts--;
470 }
471
472 /* Calculate current success ratio, avoid divide-by-0! */
473 if (win->counter > 0)
474 win->success_ratio =
475 128 * (100 * win->success_counter) / win->counter;
476 else
477 win->success_ratio = IL_INVALID_VALUE;
478
479 fail_count = win->counter - win->success_counter;
480
481 /* Calculate average throughput, if we have enough history. */
482 if (fail_count >= RATE_MIN_FAILURE_TH ||
483 win->success_counter >= RATE_MIN_SUCCESS_TH)
484 win->average_tpt = (win->success_ratio * tpt + 64) / 128;
485 else
486 win->average_tpt = IL_INVALID_VALUE;
487
488 /* Tag this win as having been updated */
489 win->stamp = jiffies;
490
491 return 0;
492}
493
494/*
495 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
496 */
497static u32
498il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
499 int idx, u8 use_green)
500{
501 u32 rate_n_flags = 0;
502
503 if (is_legacy(tbl->lq_type)) {
504 rate_n_flags = il_rates[idx].plcp;
505 if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
506 rate_n_flags |= RATE_MCS_CCK_MSK;
507
508 } else if (is_Ht(tbl->lq_type)) {
509 if (idx > IL_LAST_OFDM_RATE) {
510 IL_ERR("Invalid HT rate idx %d\n", idx);
511 idx = IL_LAST_OFDM_RATE;
512 }
513 rate_n_flags = RATE_MCS_HT_MSK;
514
515 if (is_siso(tbl->lq_type))
516 rate_n_flags |= il_rates[idx].plcp_siso;
517 else
518 rate_n_flags |= il_rates[idx].plcp_mimo2;
519 } else {
520 IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
521 }
522
523 rate_n_flags |=
524 ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
525
526 if (is_Ht(tbl->lq_type)) {
527 if (tbl->is_ht40) {
528 if (tbl->is_dup)
529 rate_n_flags |= RATE_MCS_DUP_MSK;
530 else
531 rate_n_flags |= RATE_MCS_HT40_MSK;
532 }
533 if (tbl->is_SGI)
534 rate_n_flags |= RATE_MCS_SGI_MSK;
535
536 if (use_green) {
537 rate_n_flags |= RATE_MCS_GF_MSK;
538 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
539 rate_n_flags &= ~RATE_MCS_SGI_MSK;
540 IL_ERR("GF was set with SGI:SISO\n");
541 }
542 }
543 }
544 return rate_n_flags;
545}
546
547/*
548 * Interpret uCode API's rate_n_flags format,
549 * fill "search" or "active" tx mode table.
550 */
551static int
552il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
553 enum ieee80211_band band,
554 struct il_scale_tbl_info *tbl, int *rate_idx)
555{
556 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
557 u8 il4965_num_of_ant =
558 il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
559 u8 mcs;
560
561 memset(tbl, 0, sizeof(struct il_scale_tbl_info));
562 *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
563
564 if (*rate_idx == RATE_INVALID) {
565 *rate_idx = -1;
566 return -EINVAL;
567 }
568 tbl->is_SGI = 0; /* default legacy setup */
569 tbl->is_ht40 = 0;
570 tbl->is_dup = 0;
571 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
572 tbl->lq_type = LQ_NONE;
573 tbl->max_search = IL_MAX_SEARCH;
574
575 /* legacy rate format */
576 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
577 if (il4965_num_of_ant == 1) {
578 if (band == IEEE80211_BAND_5GHZ)
579 tbl->lq_type = LQ_A;
580 else
581 tbl->lq_type = LQ_G;
582 }
583 /* HT rate format */
584 } else {
585 if (rate_n_flags & RATE_MCS_SGI_MSK)
586 tbl->is_SGI = 1;
587
588 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
589 (rate_n_flags & RATE_MCS_DUP_MSK))
590 tbl->is_ht40 = 1;
591
592 if (rate_n_flags & RATE_MCS_DUP_MSK)
593 tbl->is_dup = 1;
594
595 mcs = il4965_rs_extract_rate(rate_n_flags);
596
597 /* SISO */
598 if (mcs <= RATE_SISO_60M_PLCP) {
599 if (il4965_num_of_ant == 1)
600 tbl->lq_type = LQ_SISO; /*else NONE */
601 /* MIMO2 */
602 } else {
603 if (il4965_num_of_ant == 2)
604 tbl->lq_type = LQ_MIMO2;
605 }
606 }
607 return 0;
608}
609
610/* switch to another antenna/antennas and return 1 */
611/* if no other valid antenna found, return 0 */
612static int
613il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
614 struct il_scale_tbl_info *tbl)
615{
616 u8 new_ant_type;
617
618 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
619 return 0;
620
621 if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
622 return 0;
623
624 new_ant_type = ant_toggle_lookup[tbl->ant_type];
625
626 while (new_ant_type != tbl->ant_type &&
627 !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
628 new_ant_type = ant_toggle_lookup[new_ant_type];
629
630 if (new_ant_type == tbl->ant_type)
631 return 0;
632
633 tbl->ant_type = new_ant_type;
634 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
635 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
636 return 1;
637}
638
639/**
640 * Green-field mode is valid if the station supports it and
641 * there are no non-GF stations present in the BSS.
642 */
643static bool
644il4965_rs_use_green(struct ieee80211_sta *sta)
645{
646 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
647 struct il_rxon_context *ctx = sta_priv->common.ctx;
648
649 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
650 !(ctx->ht.non_gf_sta_present);
651}
652
653/**
654 * il4965_rs_get_supported_rates - get the available rates
655 *
656 * if management frame or broadcast frame only return
657 * basic available rates.
658 *
659 */
660static u16
661il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
662 struct ieee80211_hdr *hdr,
663 enum il_table_type rate_type)
664{
665 if (is_legacy(rate_type)) {
666 return lq_sta->active_legacy_rate;
667 } else {
668 if (is_siso(rate_type))
669 return lq_sta->active_siso_rate;
670 else
671 return lq_sta->active_mimo2_rate;
672 }
673}
674
675static u16
676il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
677 int rate_type)
678{
679 u8 high = RATE_INVALID;
680 u8 low = RATE_INVALID;
681
682 /* 802.11A or ht walks to the next literal adjacent rate in
683 * the rate table */
684 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
685 int i;
686 u32 mask;
687
688 /* Find the previous rate that is in the rate mask */
689 i = idx - 1;
690 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
691 if (rate_mask & mask) {
692 low = i;
693 break;
694 }
695 }
696
697 /* Find the next rate that is in the rate mask */
698 i = idx + 1;
699 for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
700 if (rate_mask & mask) {
701 high = i;
702 break;
703 }
704 }
705
706 return (high << 8) | low;
707 }
708
709 low = idx;
710 while (low != RATE_INVALID) {
711 low = il_rates[low].prev_rs;
712 if (low == RATE_INVALID)
713 break;
714 if (rate_mask & (1 << low))
715 break;
716 D_RATE("Skipping masked lower rate: %d\n", low);
717 }
718
719 high = idx;
720 while (high != RATE_INVALID) {
721 high = il_rates[high].next_rs;
722 if (high == RATE_INVALID)
723 break;
724 if (rate_mask & (1 << high))
725 break;
726 D_RATE("Skipping masked higher rate: %d\n", high);
727 }
728
729 return (high << 8) | low;
730}
731
732static u32
733il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
734 struct il_scale_tbl_info *tbl, u8 scale_idx,
735 u8 ht_possible)
736{
737 s32 low;
738 u16 rate_mask;
739 u16 high_low;
740 u8 switch_to_legacy = 0;
741 u8 is_green = lq_sta->is_green;
742 struct il_priv *il = lq_sta->drv;
743
744 /* check if we need to switch from HT to legacy rates.
745 * assumption is that mandatory rates (1Mbps or 6Mbps)
746 * are always supported (spec demand) */
747 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
748 switch_to_legacy = 1;
749 scale_idx = rs_ht_to_legacy[scale_idx];
750 if (lq_sta->band == IEEE80211_BAND_5GHZ)
751 tbl->lq_type = LQ_A;
752 else
753 tbl->lq_type = LQ_G;
754
755 if (il4965_num_of_ant(tbl->ant_type) > 1)
756 tbl->ant_type =
757 il4965_first_antenna(il->hw_params.valid_tx_ant);
758
759 tbl->is_ht40 = 0;
760 tbl->is_SGI = 0;
761 tbl->max_search = IL_MAX_SEARCH;
762 }
763
764 rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
765
766 /* Mask with station rate restriction */
767 if (is_legacy(tbl->lq_type)) {
768 /* supp_rates has no CCK bits in A mode */
769 if (lq_sta->band == IEEE80211_BAND_5GHZ)
770 rate_mask =
771 (u16) (rate_mask &
772 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
773 else
774 rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
775 }
776
777 /* If we switched from HT to legacy, check current rate */
778 if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
779 low = scale_idx;
780 goto out;
781 }
782
783 high_low =
784 il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
785 tbl->lq_type);
786 low = high_low & 0xff;
787
788 if (low == RATE_INVALID)
789 low = scale_idx;
790
791out:
792 return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
793}
794
795/*
796 * Simple function to compare two rate scale table types
797 */
798static bool
799il4965_table_type_matches(struct il_scale_tbl_info *a,
800 struct il_scale_tbl_info *b)
801{
802 return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
803 a->is_SGI == b->is_SGI);
804}
805
806/*
807 * mac80211 sends us Tx status
808 */
809static void
810il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
811 struct ieee80211_sta *sta, void *il_sta,
812 struct sk_buff *skb)
813{
814 int legacy_success;
815 int retries;
816 int rs_idx, mac_idx, i;
817 struct il_lq_sta *lq_sta = il_sta;
818 struct il_link_quality_cmd *table;
819 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
820 struct il_priv *il = (struct il_priv *)il_r;
821 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
822 enum mac80211_rate_control_flags mac_flags;
823 u32 tx_rate;
824 struct il_scale_tbl_info tbl_type;
825 struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
826 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
827 struct il_rxon_context *ctx = sta_priv->common.ctx;
828
829 D_RATE("get frame ack response, update rate scale win\n");
830
831 /* Treat uninitialized rate scaling data same as non-existing. */
832 if (!lq_sta) {
833 D_RATE("Station rate scaling not created yet.\n");
834 return;
835 } else if (!lq_sta->drv) {
836 D_RATE("Rate scaling not initialized yet.\n");
837 return;
838 }
839
840 if (!ieee80211_is_data(hdr->frame_control) ||
841 (info->flags & IEEE80211_TX_CTL_NO_ACK))
842 return;
843
844 /* This packet was aggregated but doesn't carry status info */
845 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
846 !(info->flags & IEEE80211_TX_STAT_AMPDU))
847 return;
848
849 /*
850 * Ignore this Tx frame response if its initial rate doesn't match
851 * that of latest Link Quality command. There may be stragglers
852 * from a previous Link Quality command, but we're no longer interested
853 * in those; they're either from the "active" mode while we're trying
854 * to check "search" mode, or a prior "search" mode after we've moved
855 * to a new "search" mode (which might become the new "active" mode).
856 */
857 table = &lq_sta->lq;
858 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
859 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
860 if (il->band == IEEE80211_BAND_5GHZ)
861 rs_idx -= IL_FIRST_OFDM_RATE;
862 mac_flags = info->status.rates[0].flags;
863 mac_idx = info->status.rates[0].idx;
864 /* For HT packets, map MCS to PLCP */
865 if (mac_flags & IEEE80211_TX_RC_MCS) {
866 mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */
867 if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
868 mac_idx++;
869 /*
870 * mac80211 HT idx is always zero-idxed; we need to move
871 * HT OFDM rates after CCK rates in 2.4 GHz band
872 */
873 if (il->band == IEEE80211_BAND_2GHZ)
874 mac_idx += IL_FIRST_OFDM_RATE;
875 }
876 /* Here we actually compare this rate to the latest LQ command */
877 if (mac_idx < 0 ||
878 tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
879 tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
880 tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
881 tbl_type.ant_type != info->antenna_sel_tx ||
882 !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
883 || !!(tx_rate & RATE_MCS_GF_MSK) !=
884 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
885 D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
886 rs_idx, tx_rate);
887 /*
888 * Since rates mis-match, the last LQ command may have failed.
889 * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
890 * ... driver.
891 */
892 lq_sta->missed_rate_counter++;
893 if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
894 lq_sta->missed_rate_counter = 0;
895 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
896 }
897 /* Regardless, ignore this status info for outdated rate */
898 return;
899 } else
900 /* Rate did match, so reset the missed_rate_counter */
901 lq_sta->missed_rate_counter = 0;
902
903 /* Figure out if rate scale algorithm is in active or search table */
904 if (il4965_table_type_matches
905 (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
906 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
907 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
908 } else
909 if (il4965_table_type_matches
910 (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
911 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
912 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
913 } else {
914 D_RATE("Neither active nor search matches tx rate\n");
915 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
916 D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
917 tmp_tbl->ant_type, tmp_tbl->is_SGI);
918 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
919 D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
920 tmp_tbl->ant_type, tmp_tbl->is_SGI);
921 D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
922 tbl_type.ant_type, tbl_type.is_SGI);
923 /*
924 * no matching table found, let's by-pass the data collection
925 * and continue to perform rate scale to find the rate table
926 */
927 il4965_rs_stay_in_table(lq_sta, true);
928 goto done;
929 }
930
931 /*
932 * Updating the frame history depends on whether packets were
933 * aggregated.
934 *
935 * For aggregation, all packets were transmitted at the same rate, the
936 * first idx into rate scale table.
937 */
938 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
939 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
940 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
941 &rs_idx);
942 il4965_rs_collect_tx_data(curr_tbl, rs_idx,
943 info->status.ampdu_len,
944 info->status.ampdu_ack_len);
945
946 /* Update success/fail counts if not searching for new mode */
947 if (lq_sta->stay_in_tbl) {
948 lq_sta->total_success += info->status.ampdu_ack_len;
949 lq_sta->total_failed +=
950 (info->status.ampdu_len -
951 info->status.ampdu_ack_len);
952 }
953 } else {
954 /*
955 * For legacy, update frame history with for each Tx retry.
956 */
957 retries = info->status.rates[0].count - 1;
958 /* HW doesn't send more than 15 retries */
959 retries = min(retries, 15);
960
961 /* The last transmission may have been successful */
962 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
963 /* Collect data for each rate used during failed TX attempts */
964 for (i = 0; i <= retries; ++i) {
965 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
966 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
967 &tbl_type, &rs_idx);
968 /*
969 * Only collect stats if retried rate is in the same RS
970 * table as active/search.
971 */
972 if (il4965_table_type_matches(&tbl_type, curr_tbl))
973 tmp_tbl = curr_tbl;
974 else if (il4965_table_type_matches
975 (&tbl_type, other_tbl))
976 tmp_tbl = other_tbl;
977 else
978 continue;
979 il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
980 i <
981 retries ? 0 : legacy_success);
982 }
983
984 /* Update success/fail counts if not searching for new mode */
985 if (lq_sta->stay_in_tbl) {
986 lq_sta->total_success += legacy_success;
987 lq_sta->total_failed += retries + (1 - legacy_success);
988 }
989 }
990 /* The last TX rate is cached in lq_sta; it's set in if/else above */
991 lq_sta->last_rate_n_flags = tx_rate;
992done:
993 /* See if there's a better rate or modulation mode to try. */
994 if (sta->supp_rates[sband->band])
995 il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
996}
997
998/*
999 * Begin a period of staying with a selected modulation mode.
1000 * Set "stay_in_tbl" flag to prevent any mode switches.
1001 * Set frame tx success limits according to legacy vs. high-throughput,
1002 * and reset overall (spanning all rates) tx success history stats.
1003 * These control how long we stay using same modulation mode before
1004 * searching for a new mode.
1005 */
1006static void
1007il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
1008 struct il_lq_sta *lq_sta)
1009{
1010 D_RATE("we are staying in the same table\n");
1011 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1012 if (is_legacy) {
1013 lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
1014 lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
1015 lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
1016 } else {
1017 lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
1018 lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
1019 lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
1020 }
1021 lq_sta->table_count = 0;
1022 lq_sta->total_failed = 0;
1023 lq_sta->total_success = 0;
1024 lq_sta->flush_timer = jiffies;
1025 lq_sta->action_counter = 0;
1026}
1027
1028/*
1029 * Find correct throughput table for given mode of modulation
1030 */
1031static void
1032il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
1033 struct il_scale_tbl_info *tbl)
1034{
1035 /* Used to choose among HT tables */
1036 s32(*ht_tbl_pointer)[RATE_COUNT];
1037
1038 /* Check for invalid LQ type */
1039 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1040 tbl->expected_tpt = expected_tpt_legacy;
1041 return;
1042 }
1043
1044 /* Legacy rates have only one table */
1045 if (is_legacy(tbl->lq_type)) {
1046 tbl->expected_tpt = expected_tpt_legacy;
1047 return;
1048 }
1049
1050 /* Choose among many HT tables depending on number of streams
1051 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1052 * status */
1053 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1054 ht_tbl_pointer = expected_tpt_siso20MHz;
1055 else if (is_siso(tbl->lq_type))
1056 ht_tbl_pointer = expected_tpt_siso40MHz;
1057 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1058 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1059 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1060 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1061
1062 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1063 tbl->expected_tpt = ht_tbl_pointer[0];
1064 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1065 tbl->expected_tpt = ht_tbl_pointer[1];
1066 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1067 tbl->expected_tpt = ht_tbl_pointer[2];
1068 else /* AGG+SGI */
1069 tbl->expected_tpt = ht_tbl_pointer[3];
1070}
1071
1072/*
1073 * Find starting rate for new "search" high-throughput mode of modulation.
1074 * Goal is to find lowest expected rate (under perfect conditions) that is
1075 * above the current measured throughput of "active" mode, to give new mode
1076 * a fair chance to prove itself without too many challenges.
1077 *
1078 * This gets called when transitioning to more aggressive modulation
1079 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1080 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1081 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1082 * bit rate will typically need to increase, but not if performance was bad.
1083 */
1084static s32
1085il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
1086 struct il_scale_tbl_info *tbl, /* "search" */
1087 u16 rate_mask, s8 idx)
1088{
1089 /* "active" values */
1090 struct il_scale_tbl_info *active_tbl =
1091 &(lq_sta->lq_info[lq_sta->active_tbl]);
1092 s32 active_sr = active_tbl->win[idx].success_ratio;
1093 s32 active_tpt = active_tbl->expected_tpt[idx];
1094
1095 /* expected "search" throughput */
1096 s32 *tpt_tbl = tbl->expected_tpt;
1097
1098 s32 new_rate, high, low, start_hi;
1099 u16 high_low;
1100 s8 rate = idx;
1101
1102 new_rate = high = low = start_hi = RATE_INVALID;
1103
1104 for (;;) {
1105 high_low =
1106 il4965_rs_get_adjacent_rate(il, rate, rate_mask,
1107 tbl->lq_type);
1108
1109 low = high_low & 0xff;
1110 high = (high_low >> 8) & 0xff;
1111
1112 /*
1113 * Lower the "search" bit rate, to give new "search" mode
1114 * approximately the same throughput as "active" if:
1115 *
1116 * 1) "Active" mode has been working modestly well (but not
1117 * great), and expected "search" throughput (under perfect
1118 * conditions) at candidate rate is above the actual
1119 * measured "active" throughput (but less than expected
1120 * "active" throughput under perfect conditions).
1121 * OR
1122 * 2) "Active" mode has been working perfectly or very well
1123 * and expected "search" throughput (under perfect
1124 * conditions) at candidate rate is above expected
1125 * "active" throughput (under perfect conditions).
1126 */
1127 if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
1128 (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
1129 && tpt_tbl[rate] <= active_tpt)) ||
1130 (active_sr >= RATE_SCALE_SWITCH &&
1131 tpt_tbl[rate] > active_tpt)) {
1132
1133 /* (2nd or later pass)
1134 * If we've already tried to raise the rate, and are
1135 * now trying to lower it, use the higher rate. */
1136 if (start_hi != RATE_INVALID) {
1137 new_rate = start_hi;
1138 break;
1139 }
1140
1141 new_rate = rate;
1142
1143 /* Loop again with lower rate */
1144 if (low != RATE_INVALID)
1145 rate = low;
1146
1147 /* Lower rate not available, use the original */
1148 else
1149 break;
1150
1151 /* Else try to raise the "search" rate to match "active" */
1152 } else {
1153 /* (2nd or later pass)
1154 * If we've already tried to lower the rate, and are
1155 * now trying to raise it, use the lower rate. */
1156 if (new_rate != RATE_INVALID)
1157 break;
1158
1159 /* Loop again with higher rate */
1160 else if (high != RATE_INVALID) {
1161 start_hi = high;
1162 rate = high;
1163
1164 /* Higher rate not available, use the original */
1165 } else {
1166 new_rate = rate;
1167 break;
1168 }
1169 }
1170 }
1171
1172 return new_rate;
1173}
1174
1175/*
1176 * Set up search table for MIMO2
1177 */
1178static int
1179il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
1180 struct ieee80211_conf *conf,
1181 struct ieee80211_sta *sta,
1182 struct il_scale_tbl_info *tbl, int idx)
1183{
1184 u16 rate_mask;
1185 s32 rate;
1186 s8 is_green = lq_sta->is_green;
1187 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
1188 struct il_rxon_context *ctx = sta_priv->common.ctx;
1189
1190 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1191 return -1;
1192
1193 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
1194 WLAN_HT_CAP_SM_PS_STATIC)
1195 return -1;
1196
1197 /* Need both Tx chains/antennas to support MIMO */
1198 if (il->hw_params.tx_chains_num < 2)
1199 return -1;
1200
1201 D_RATE("LQ: try to switch to MIMO2\n");
1202
1203 tbl->lq_type = LQ_MIMO2;
1204 tbl->is_dup = lq_sta->is_dup;
1205 tbl->action = 0;
1206 tbl->max_search = IL_MAX_SEARCH;
1207 rate_mask = lq_sta->active_mimo2_rate;
1208
1209 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1210 tbl->is_ht40 = 1;
1211 else
1212 tbl->is_ht40 = 0;
1213
1214 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
1215
1216 rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
1217
1218 D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1219 if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
1220 D_RATE("Can't switch with idx %d rate mask %x\n", rate,
1221 rate_mask);
1222 return -1;
1223 }
1224 tbl->current_rate =
1225 il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
1226
1227 D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
1228 is_green);
1229 return 0;
1230}
1231
1232/*
1233 * Set up search table for SISO
1234 */
1235static int
1236il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
1237 struct ieee80211_conf *conf, struct ieee80211_sta *sta,
1238 struct il_scale_tbl_info *tbl, int idx)
1239{
1240 u16 rate_mask;
1241 u8 is_green = lq_sta->is_green;
1242 s32 rate;
1243 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
1244 struct il_rxon_context *ctx = sta_priv->common.ctx;
1245
1246 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1247 return -1;
1248
1249 D_RATE("LQ: try to switch to SISO\n");
1250
1251 tbl->is_dup = lq_sta->is_dup;
1252 tbl->lq_type = LQ_SISO;
1253 tbl->action = 0;
1254 tbl->max_search = IL_MAX_SEARCH;
1255 rate_mask = lq_sta->active_siso_rate;
1256
1257 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1258 tbl->is_ht40 = 1;
1259 else
1260 tbl->is_ht40 = 0;
1261
1262 if (is_green)
1263 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */
1264
1265 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
1266 rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
1267
1268 D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
1269 if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
1270 D_RATE("can not switch with idx %d rate mask %x\n", rate,
1271 rate_mask);
1272 return -1;
1273 }
1274 tbl->current_rate =
1275 il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
1276 D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
1277 is_green);
1278 return 0;
1279}
1280
1281/*
1282 * Try to switch to new modulation mode from legacy
1283 */
1284static int
1285il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1286 struct ieee80211_conf *conf,
1287 struct ieee80211_sta *sta, int idx)
1288{
1289 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1290 struct il_scale_tbl_info *search_tbl =
1291 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1292 struct il_rate_scale_data *win = &(tbl->win[idx]);
1293 u32 sz =
1294 (sizeof(struct il_scale_tbl_info) -
1295 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1296 u8 start_action;
1297 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1298 u8 tx_chains_num = il->hw_params.tx_chains_num;
1299 int ret = 0;
1300 u8 update_search_tbl_counter = 0;
1301
1302 tbl->action = IL_LEGACY_SWITCH_SISO;
1303
1304 start_action = tbl->action;
1305 for (;;) {
1306 lq_sta->action_counter++;
1307 switch (tbl->action) {
1308 case IL_LEGACY_SWITCH_ANTENNA1:
1309 case IL_LEGACY_SWITCH_ANTENNA2:
1310 D_RATE("LQ: Legacy toggle Antenna\n");
1311
1312 if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
1313 tx_chains_num <= 1) ||
1314 (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
1315 tx_chains_num <= 2))
1316 break;
1317
1318 /* Don't change antenna if success has been great */
1319 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1320 break;
1321
1322 /* Set up search table to try other antenna */
1323 memcpy(search_tbl, tbl, sz);
1324
1325 if (il4965_rs_toggle_antenna
1326 (valid_tx_ant, &search_tbl->current_rate,
1327 search_tbl)) {
1328 update_search_tbl_counter = 1;
1329 il4965_rs_set_expected_tpt_table(lq_sta,
1330 search_tbl);
1331 goto out;
1332 }
1333 break;
1334 case IL_LEGACY_SWITCH_SISO:
1335 D_RATE("LQ: Legacy switch to SISO\n");
1336
1337 /* Set up search table to try SISO */
1338 memcpy(search_tbl, tbl, sz);
1339 search_tbl->is_SGI = 0;
1340 ret =
1341 il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
1342 search_tbl, idx);
1343 if (!ret) {
1344 lq_sta->action_counter = 0;
1345 goto out;
1346 }
1347
1348 break;
1349 case IL_LEGACY_SWITCH_MIMO2_AB:
1350 case IL_LEGACY_SWITCH_MIMO2_AC:
1351 case IL_LEGACY_SWITCH_MIMO2_BC:
1352 D_RATE("LQ: Legacy switch to MIMO2\n");
1353
1354 /* Set up search table to try MIMO */
1355 memcpy(search_tbl, tbl, sz);
1356 search_tbl->is_SGI = 0;
1357
1358 if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
1359 search_tbl->ant_type = ANT_AB;
1360 else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
1361 search_tbl->ant_type = ANT_AC;
1362 else
1363 search_tbl->ant_type = ANT_BC;
1364
1365 if (!il4965_rs_is_valid_ant
1366 (valid_tx_ant, search_tbl->ant_type))
1367 break;
1368
1369 ret =
1370 il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
1371 search_tbl, idx);
1372 if (!ret) {
1373 lq_sta->action_counter = 0;
1374 goto out;
1375 }
1376 break;
1377 }
1378 tbl->action++;
1379 if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
1380 tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
1381
1382 if (tbl->action == start_action)
1383 break;
1384
1385 }
1386 search_tbl->lq_type = LQ_NONE;
1387 return 0;
1388
1389out:
1390 lq_sta->search_better_tbl = 1;
1391 tbl->action++;
1392 if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
1393 tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
1394 if (update_search_tbl_counter)
1395 search_tbl->action = tbl->action;
1396 return 0;
1397
1398}
1399
1400/*
1401 * Try to switch to new modulation mode from SISO
1402 */
1403static int
1404il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1405 struct ieee80211_conf *conf,
1406 struct ieee80211_sta *sta, int idx)
1407{
1408 u8 is_green = lq_sta->is_green;
1409 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1410 struct il_scale_tbl_info *search_tbl =
1411 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1412 struct il_rate_scale_data *win = &(tbl->win[idx]);
1413 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1414 u32 sz =
1415 (sizeof(struct il_scale_tbl_info) -
1416 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1417 u8 start_action;
1418 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1419 u8 tx_chains_num = il->hw_params.tx_chains_num;
1420 u8 update_search_tbl_counter = 0;
1421 int ret;
1422
1423 start_action = tbl->action;
1424
1425 for (;;) {
1426 lq_sta->action_counter++;
1427 switch (tbl->action) {
1428 case IL_SISO_SWITCH_ANTENNA1:
1429 case IL_SISO_SWITCH_ANTENNA2:
1430 D_RATE("LQ: SISO toggle Antenna\n");
1431 if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
1432 tx_chains_num <= 1) ||
1433 (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
1434 tx_chains_num <= 2))
1435 break;
1436
1437 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1438 break;
1439
1440 memcpy(search_tbl, tbl, sz);
1441 if (il4965_rs_toggle_antenna
1442 (valid_tx_ant, &search_tbl->current_rate,
1443 search_tbl)) {
1444 update_search_tbl_counter = 1;
1445 goto out;
1446 }
1447 break;
1448 case IL_SISO_SWITCH_MIMO2_AB:
1449 case IL_SISO_SWITCH_MIMO2_AC:
1450 case IL_SISO_SWITCH_MIMO2_BC:
1451 D_RATE("LQ: SISO switch to MIMO2\n");
1452 memcpy(search_tbl, tbl, sz);
1453 search_tbl->is_SGI = 0;
1454
1455 if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
1456 search_tbl->ant_type = ANT_AB;
1457 else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
1458 search_tbl->ant_type = ANT_AC;
1459 else
1460 search_tbl->ant_type = ANT_BC;
1461
1462 if (!il4965_rs_is_valid_ant
1463 (valid_tx_ant, search_tbl->ant_type))
1464 break;
1465
1466 ret =
1467 il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
1468 search_tbl, idx);
1469 if (!ret)
1470 goto out;
1471 break;
1472 case IL_SISO_SWITCH_GI:
1473 if (!tbl->is_ht40 &&
1474 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
1475 break;
1476 if (tbl->is_ht40 &&
1477 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
1478 break;
1479
1480 D_RATE("LQ: SISO toggle SGI/NGI\n");
1481
1482 memcpy(search_tbl, tbl, sz);
1483 if (is_green) {
1484 if (!tbl->is_SGI)
1485 break;
1486 else
1487 IL_ERR("SGI was set in GF+SISO\n");
1488 }
1489 search_tbl->is_SGI = !tbl->is_SGI;
1490 il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1491 if (tbl->is_SGI) {
1492 s32 tpt = lq_sta->last_tpt / 100;
1493 if (tpt >= search_tbl->expected_tpt[idx])
1494 break;
1495 }
1496 search_tbl->current_rate =
1497 il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
1498 is_green);
1499 update_search_tbl_counter = 1;
1500 goto out;
1501 }
1502 tbl->action++;
1503 if (tbl->action > IL_SISO_SWITCH_GI)
1504 tbl->action = IL_SISO_SWITCH_ANTENNA1;
1505
1506 if (tbl->action == start_action)
1507 break;
1508 }
1509 search_tbl->lq_type = LQ_NONE;
1510 return 0;
1511
1512out:
1513 lq_sta->search_better_tbl = 1;
1514 tbl->action++;
1515 if (tbl->action > IL_SISO_SWITCH_GI)
1516 tbl->action = IL_SISO_SWITCH_ANTENNA1;
1517 if (update_search_tbl_counter)
1518 search_tbl->action = tbl->action;
1519
1520 return 0;
1521}
1522
1523/*
1524 * Try to switch to new modulation mode from MIMO2
1525 */
1526static int
1527il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1528 struct ieee80211_conf *conf,
1529 struct ieee80211_sta *sta, int idx)
1530{
1531 s8 is_green = lq_sta->is_green;
1532 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1533 struct il_scale_tbl_info *search_tbl =
1534 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1535 struct il_rate_scale_data *win = &(tbl->win[idx]);
1536 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1537 u32 sz =
1538 (sizeof(struct il_scale_tbl_info) -
1539 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1540 u8 start_action;
1541 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1542 u8 tx_chains_num = il->hw_params.tx_chains_num;
1543 u8 update_search_tbl_counter = 0;
1544 int ret;
1545
1546 start_action = tbl->action;
1547 for (;;) {
1548 lq_sta->action_counter++;
1549 switch (tbl->action) {
1550 case IL_MIMO2_SWITCH_ANTENNA1:
1551 case IL_MIMO2_SWITCH_ANTENNA2:
1552 D_RATE("LQ: MIMO2 toggle Antennas\n");
1553
1554 if (tx_chains_num <= 2)
1555 break;
1556
1557 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1558 break;
1559
1560 memcpy(search_tbl, tbl, sz);
1561 if (il4965_rs_toggle_antenna
1562 (valid_tx_ant, &search_tbl->current_rate,
1563 search_tbl)) {
1564 update_search_tbl_counter = 1;
1565 goto out;
1566 }
1567 break;
1568 case IL_MIMO2_SWITCH_SISO_A:
1569 case IL_MIMO2_SWITCH_SISO_B:
1570 case IL_MIMO2_SWITCH_SISO_C:
1571 D_RATE("LQ: MIMO2 switch to SISO\n");
1572
1573 /* Set up new search table for SISO */
1574 memcpy(search_tbl, tbl, sz);
1575
1576 if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
1577 search_tbl->ant_type = ANT_A;
1578 else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
1579 search_tbl->ant_type = ANT_B;
1580 else
1581 search_tbl->ant_type = ANT_C;
1582
1583 if (!il4965_rs_is_valid_ant
1584 (valid_tx_ant, search_tbl->ant_type))
1585 break;
1586
1587 ret =
1588 il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
1589 search_tbl, idx);
1590 if (!ret)
1591 goto out;
1592
1593 break;
1594
1595 case IL_MIMO2_SWITCH_GI:
1596 if (!tbl->is_ht40 &&
1597 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
1598 break;
1599 if (tbl->is_ht40 &&
1600 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
1601 break;
1602
1603 D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
1604
1605 /* Set up new search table for MIMO2 */
1606 memcpy(search_tbl, tbl, sz);
1607 search_tbl->is_SGI = !tbl->is_SGI;
1608 il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1609 /*
1610 * If active table already uses the fastest possible
1611 * modulation (dual stream with short guard interval),
1612 * and it's working well, there's no need to look
1613 * for a better type of modulation!
1614 */
1615 if (tbl->is_SGI) {
1616 s32 tpt = lq_sta->last_tpt / 100;
1617 if (tpt >= search_tbl->expected_tpt[idx])
1618 break;
1619 }
1620 search_tbl->current_rate =
1621 il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
1622 is_green);
1623 update_search_tbl_counter = 1;
1624 goto out;
1625
1626 }
1627 tbl->action++;
1628 if (tbl->action > IL_MIMO2_SWITCH_GI)
1629 tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
1630
1631 if (tbl->action == start_action)
1632 break;
1633 }
1634 search_tbl->lq_type = LQ_NONE;
1635 return 0;
1636out:
1637 lq_sta->search_better_tbl = 1;
1638 tbl->action++;
1639 if (tbl->action > IL_MIMO2_SWITCH_GI)
1640 tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
1641 if (update_search_tbl_counter)
1642 search_tbl->action = tbl->action;
1643
1644 return 0;
1645
1646}
1647
1648/*
1649 * Check whether we should continue using same modulation mode, or
1650 * begin search for a new mode, based on:
1651 * 1) # tx successes or failures while using this mode
1652 * 2) # times calling this function
1653 * 3) elapsed time in this mode (not used, for now)
1654 */
1655static void
1656il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
1657{
1658 struct il_scale_tbl_info *tbl;
1659 int i;
1660 int active_tbl;
1661 int flush_interval_passed = 0;
1662 struct il_priv *il;
1663
1664 il = lq_sta->drv;
1665 active_tbl = lq_sta->active_tbl;
1666
1667 tbl = &(lq_sta->lq_info[active_tbl]);
1668
1669 /* If we've been disallowing search, see if we should now allow it */
1670 if (lq_sta->stay_in_tbl) {
1671
1672 /* Elapsed time using current modulation mode */
1673 if (lq_sta->flush_timer)
1674 flush_interval_passed =
1675 time_after(jiffies,
1676 (unsigned long)(lq_sta->flush_timer +
1677 RATE_SCALE_FLUSH_INTVL));
1678
1679 /*
1680 * Check if we should allow search for new modulation mode.
1681 * If many frames have failed or succeeded, or we've used
1682 * this same modulation for a long time, allow search, and
1683 * reset history stats that keep track of whether we should
1684 * allow a new search. Also (below) reset all bitmaps and
1685 * stats in active history.
1686 */
1687 if (force_search ||
1688 lq_sta->total_failed > lq_sta->max_failure_limit ||
1689 lq_sta->total_success > lq_sta->max_success_limit ||
1690 (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
1691 flush_interval_passed)) {
1692 D_RATE("LQ: stay is expired %d %d %d\n:",
1693 lq_sta->total_failed, lq_sta->total_success,
1694 flush_interval_passed);
1695
1696 /* Allow search for new mode */
1697 lq_sta->stay_in_tbl = 0; /* only place reset */
1698 lq_sta->total_failed = 0;
1699 lq_sta->total_success = 0;
1700 lq_sta->flush_timer = 0;
1701
1702 /*
1703 * Else if we've used this modulation mode enough repetitions
1704 * (regardless of elapsed time or success/failure), reset
1705 * history bitmaps and rate-specific stats for all rates in
1706 * active table.
1707 */
1708 } else {
1709 lq_sta->table_count++;
1710 if (lq_sta->table_count >= lq_sta->table_count_limit) {
1711 lq_sta->table_count = 0;
1712
1713 D_RATE("LQ: stay in table clear win\n");
1714 for (i = 0; i < RATE_COUNT; i++)
1715 il4965_rs_rate_scale_clear_win(&
1716 (tbl->
1717 win
1718 [i]));
1719 }
1720 }
1721
1722 /* If transitioning to allow "search", reset all history
1723 * bitmaps and stats in active table (this will become the new
1724 * "search" table). */
1725 if (!lq_sta->stay_in_tbl) {
1726 for (i = 0; i < RATE_COUNT; i++)
1727 il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
1728 }
1729 }
1730}
1731
1732/*
1733 * setup rate table in uCode
1734 */
1735static void
1736il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
1737 struct il_lq_sta *lq_sta,
1738 struct il_scale_tbl_info *tbl, int idx, u8 is_green)
1739{
1740 u32 rate;
1741
1742 /* Update uCode's rate table. */
1743 rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
1744 il4965_rs_fill_link_cmd(il, lq_sta, rate);
1745 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
1746}
1747
1748/*
1749 * Do rate scaling and search for new modulation mode.
1750 */
1751static void
1752il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
1753 struct ieee80211_sta *sta,
1754 struct il_lq_sta *lq_sta)
1755{
1756 struct ieee80211_hw *hw = il->hw;
1757 struct ieee80211_conf *conf = &hw->conf;
1758 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1759 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1760 int low = RATE_INVALID;
1761 int high = RATE_INVALID;
1762 int idx;
1763 int i;
1764 struct il_rate_scale_data *win = NULL;
1765 int current_tpt = IL_INVALID_VALUE;
1766 int low_tpt = IL_INVALID_VALUE;
1767 int high_tpt = IL_INVALID_VALUE;
1768 u32 fail_count;
1769 s8 scale_action = 0;
1770 u16 rate_mask;
1771 u8 update_lq = 0;
1772 struct il_scale_tbl_info *tbl, *tbl1;
1773 u16 rate_scale_idx_msk = 0;
1774 u8 is_green = 0;
1775 u8 active_tbl = 0;
1776 u8 done_search = 0;
1777 u16 high_low;
1778 s32 sr;
1779 u8 tid = MAX_TID_COUNT;
1780 struct il_tid_data *tid_data;
1781 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
1782 struct il_rxon_context *ctx = sta_priv->common.ctx;
1783
1784 D_RATE("rate scale calculate new rate for skb\n");
1785
1786 /* Send management frames and NO_ACK data using lowest rate. */
1787 /* TODO: this could probably be improved.. */
1788 if (!ieee80211_is_data(hdr->frame_control) ||
1789 (info->flags & IEEE80211_TX_CTL_NO_ACK))
1790 return;
1791
1792 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1793
1794 tid = il4965_rs_tl_add_packet(lq_sta, hdr);
1795 if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1796 tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
1797 if (tid_data->agg.state == IL_AGG_OFF)
1798 lq_sta->is_agg = 0;
1799 else
1800 lq_sta->is_agg = 1;
1801 } else
1802 lq_sta->is_agg = 0;
1803
1804 /*
1805 * Select rate-scale / modulation-mode table to work with in
1806 * the rest of this function: "search" if searching for better
1807 * modulation mode, or "active" if doing rate scaling within a mode.
1808 */
1809 if (!lq_sta->search_better_tbl)
1810 active_tbl = lq_sta->active_tbl;
1811 else
1812 active_tbl = 1 - lq_sta->active_tbl;
1813
1814 tbl = &(lq_sta->lq_info[active_tbl]);
1815 if (is_legacy(tbl->lq_type))
1816 lq_sta->is_green = 0;
1817 else
1818 lq_sta->is_green = il4965_rs_use_green(sta);
1819 is_green = lq_sta->is_green;
1820
1821 /* current tx rate */
1822 idx = lq_sta->last_txrate_idx;
1823
1824 D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
1825
1826 /* rates available for this association, and for modulation mode */
1827 rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1828
1829 D_RATE("mask 0x%04X\n", rate_mask);
1830
1831 /* mask with station rate restriction */
1832 if (is_legacy(tbl->lq_type)) {
1833 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1834 /* supp_rates has no CCK bits in A mode */
1835 rate_scale_idx_msk =
1836 (u16) (rate_mask &
1837 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
1838 else
1839 rate_scale_idx_msk =
1840 (u16) (rate_mask & lq_sta->supp_rates);
1841
1842 } else
1843 rate_scale_idx_msk = rate_mask;
1844
1845 if (!rate_scale_idx_msk)
1846 rate_scale_idx_msk = rate_mask;
1847
1848 if (!((1 << idx) & rate_scale_idx_msk)) {
1849 IL_ERR("Current Rate is not valid\n");
1850 if (lq_sta->search_better_tbl) {
1851 /* revert to active table if search table is not valid */
1852 tbl->lq_type = LQ_NONE;
1853 lq_sta->search_better_tbl = 0;
1854 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1855 /* get "active" rate info */
1856 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
1857 il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
1858 is_green);
1859 }
1860 return;
1861 }
1862
1863 /* Get expected throughput table and history win for current rate */
1864 if (!tbl->expected_tpt) {
1865 IL_ERR("tbl->expected_tpt is NULL\n");
1866 return;
1867 }
1868
1869 /* force user max rate if set by user */
1870 if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
1871 idx = lq_sta->max_rate_idx;
1872 update_lq = 1;
1873 win = &(tbl->win[idx]);
1874 goto lq_update;
1875 }
1876
1877 win = &(tbl->win[idx]);
1878
1879 /*
1880 * If there is not enough history to calculate actual average
1881 * throughput, keep analyzing results of more tx frames, without
1882 * changing rate or mode (bypass most of the rest of this function).
1883 * Set up new rate table in uCode only if old rate is not supported
1884 * in current association (use new rate found above).
1885 */
1886 fail_count = win->counter - win->success_counter;
1887 if (fail_count < RATE_MIN_FAILURE_TH &&
1888 win->success_counter < RATE_MIN_SUCCESS_TH) {
1889 D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
1890 win->success_counter, win->counter, idx);
1891
1892 /* Can't calculate this yet; not enough history */
1893 win->average_tpt = IL_INVALID_VALUE;
1894
1895 /* Should we stay with this modulation mode,
1896 * or search for a new one? */
1897 il4965_rs_stay_in_table(lq_sta, false);
1898
1899 goto out;
1900 }
1901 /* Else we have enough samples; calculate estimate of
1902 * actual average throughput */
1903 if (win->average_tpt !=
1904 ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
1905 IL_ERR("expected_tpt should have been calculated by now\n");
1906 win->average_tpt =
1907 ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
1908 }
1909
1910 /* If we are searching for better modulation mode, check success. */
1911 if (lq_sta->search_better_tbl) {
1912 /* If good success, continue using the "search" mode;
1913 * no need to send new link quality command, since we're
1914 * continuing to use the setup that we've been trying. */
1915 if (win->average_tpt > lq_sta->last_tpt) {
1916
1917 D_RATE("LQ: SWITCHING TO NEW TBL "
1918 "suc=%d cur-tpt=%d old-tpt=%d\n",
1919 win->success_ratio, win->average_tpt,
1920 lq_sta->last_tpt);
1921
1922 if (!is_legacy(tbl->lq_type))
1923 lq_sta->enable_counter = 1;
1924
1925 /* Swap tables; "search" becomes "active" */
1926 lq_sta->active_tbl = active_tbl;
1927 current_tpt = win->average_tpt;
1928
1929 /* Else poor success; go back to mode in "active" table */
1930 } else {
1931
1932 D_RATE("LQ: GOING BACK TO THE OLD TBL "
1933 "suc=%d cur-tpt=%d old-tpt=%d\n",
1934 win->success_ratio, win->average_tpt,
1935 lq_sta->last_tpt);
1936
1937 /* Nullify "search" table */
1938 tbl->lq_type = LQ_NONE;
1939
1940 /* Revert to "active" table */
1941 active_tbl = lq_sta->active_tbl;
1942 tbl = &(lq_sta->lq_info[active_tbl]);
1943
1944 /* Revert to "active" rate and throughput info */
1945 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
1946 current_tpt = lq_sta->last_tpt;
1947
1948 /* Need to set up a new rate table in uCode */
1949 update_lq = 1;
1950 }
1951
1952 /* Either way, we've made a decision; modulation mode
1953 * search is done, allow rate adjustment next time. */
1954 lq_sta->search_better_tbl = 0;
1955 done_search = 1; /* Don't switch modes below! */
1956 goto lq_update;
1957 }
1958
1959 /* (Else) not in search of better modulation mode, try for better
1960 * starting rate, while staying in this mode. */
1961 high_low =
1962 il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
1963 tbl->lq_type);
1964 low = high_low & 0xff;
1965 high = (high_low >> 8) & 0xff;
1966
1967 /* If user set max rate, dont allow higher than user constrain */
1968 if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
1969 high = RATE_INVALID;
1970
1971 sr = win->success_ratio;
1972
1973 /* Collect measured throughputs for current and adjacent rates */
1974 current_tpt = win->average_tpt;
1975 if (low != RATE_INVALID)
1976 low_tpt = tbl->win[low].average_tpt;
1977 if (high != RATE_INVALID)
1978 high_tpt = tbl->win[high].average_tpt;
1979
1980 scale_action = 0;
1981
1982 /* Too many failures, decrease rate */
1983 if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
1984 D_RATE("decrease rate because of low success_ratio\n");
1985 scale_action = -1;
1986
1987 /* No throughput measured yet for adjacent rates; try increase. */
1988 } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
1989
1990 if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
1991 scale_action = 1;
1992 else if (low != RATE_INVALID)
1993 scale_action = 0;
1994 }
1995
1996 /* Both adjacent throughputs are measured, but neither one has better
1997 * throughput; we're using the best rate, don't change it! */
1998 else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
1999 low_tpt < current_tpt && high_tpt < current_tpt)
2000 scale_action = 0;
2001
2002 /* At least one adjacent rate's throughput is measured,
2003 * and may have better performance. */
2004 else {
2005 /* Higher adjacent rate's throughput is measured */
2006 if (high_tpt != IL_INVALID_VALUE) {
2007 /* Higher rate has better throughput */
2008 if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
2009 scale_action = 1;
2010 else
2011 scale_action = 0;
2012
2013 /* Lower adjacent rate's throughput is measured */
2014 } else if (low_tpt != IL_INVALID_VALUE) {
2015 /* Lower rate has better throughput */
2016 if (low_tpt > current_tpt) {
2017 D_RATE("decrease rate because of low tpt\n");
2018 scale_action = -1;
2019 } else if (sr >= RATE_INCREASE_TH) {
2020 scale_action = 1;
2021 }
2022 }
2023 }
2024
2025 /* Sanity check; asked for decrease, but success rate or throughput
2026 * has been good at old rate. Don't change it. */
2027 if (scale_action == -1 && low != RATE_INVALID &&
2028 (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
2029 scale_action = 0;
2030
2031 switch (scale_action) {
2032 case -1:
2033 /* Decrease starting rate, update uCode's rate table */
2034 if (low != RATE_INVALID) {
2035 update_lq = 1;
2036 idx = low;
2037 }
2038
2039 break;
2040 case 1:
2041 /* Increase starting rate, update uCode's rate table */
2042 if (high != RATE_INVALID) {
2043 update_lq = 1;
2044 idx = high;
2045 }
2046
2047 break;
2048 case 0:
2049 /* No change */
2050 default:
2051 break;
2052 }
2053
2054 D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
2055 idx, scale_action, low, high, tbl->lq_type);
2056
2057lq_update:
2058 /* Replace uCode's rate table for the destination station. */
2059 if (update_lq)
2060 il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
2061 is_green);
2062
2063 /* Should we stay with this modulation mode,
2064 * or search for a new one? */
2065 il4965_rs_stay_in_table(lq_sta, false);
2066
2067 /*
2068 * Search for new modulation mode if we're:
2069 * 1) Not changing rates right now
2070 * 2) Not just finishing up a search
2071 * 3) Allowing a new search
2072 */
2073 if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
2074 /* Save current throughput to compare with "search" throughput */
2075 lq_sta->last_tpt = current_tpt;
2076
2077 /* Select a new "search" modulation mode to try.
2078 * If one is found, set up the new "search" table. */
2079 if (is_legacy(tbl->lq_type))
2080 il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
2081 else if (is_siso(tbl->lq_type))
2082 il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
2083 idx);
2084 else /* (is_mimo2(tbl->lq_type)) */
2085 il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
2086 idx);
2087
2088 /* If new "search" mode was selected, set up in uCode table */
2089 if (lq_sta->search_better_tbl) {
2090 /* Access the "search" table, clear its history. */
2091 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2092 for (i = 0; i < RATE_COUNT; i++)
2093 il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
2094
2095 /* Use new "search" start rate */
2096 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
2097
2098 D_RATE("Switch current mcs: %X idx: %d\n",
2099 tbl->current_rate, idx);
2100 il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
2101 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
2102 } else
2103 done_search = 1;
2104 }
2105
2106 if (done_search && !lq_sta->stay_in_tbl) {
2107 /* If the "active" (non-search) mode was legacy,
2108 * and we've tried switching antennas,
2109 * but we haven't been able to try HT modes (not available),
2110 * stay with best antenna legacy modulation for a while
2111 * before next round of mode comparisons. */
2112 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2113 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2114 lq_sta->action_counter > tbl1->max_search) {
2115 D_RATE("LQ: STAY in legacy table\n");
2116 il4965_rs_set_stay_in_table(il, 1, lq_sta);
2117 }
2118
2119 /* If we're in an HT mode, and all 3 mode switch actions
2120 * have been tried and compared, stay in this best modulation
2121 * mode for a while before next round of mode comparisons. */
2122 if (lq_sta->enable_counter &&
2123 lq_sta->action_counter >= tbl1->max_search) {
2124 if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
2125 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2126 tid != MAX_TID_COUNT) {
2127 tid_data =
2128 &il->stations[lq_sta->lq.sta_id].tid[tid];
2129 if (tid_data->agg.state == IL_AGG_OFF) {
2130 D_RATE("try to aggregate tid %d\n",
2131 tid);
2132 il4965_rs_tl_turn_on_agg(il, tid,
2133 lq_sta, sta);
2134 }
2135 }
2136 il4965_rs_set_stay_in_table(il, 0, lq_sta);
2137 }
2138 }
2139
2140out:
2141 tbl->current_rate =
2142 il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
2143 i = idx;
2144 lq_sta->last_txrate_idx = i;
2145}
2146
2147/**
2148 * il4965_rs_initialize_lq - Initialize a station's hardware rate table
2149 *
2150 * The uCode's station table contains a table of fallback rates
2151 * for automatic fallback during transmission.
2152 *
2153 * NOTE: This sets up a default set of values. These will be replaced later
2154 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2155 * rc80211_simple.
2156 *
2157 * NOTE: Run C_ADD_STA command to set up station table entry, before
2158 * calling this function (which runs C_TX_LINK_QUALITY_CMD,
2159 * which requires station table entry to exist).
2160 */
2161static void
2162il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
2163 struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
2164{
2165 struct il_scale_tbl_info *tbl;
2166 int rate_idx;
2167 int i;
2168 u32 rate;
2169 u8 use_green = il4965_rs_use_green(sta);
2170 u8 active_tbl = 0;
2171 u8 valid_tx_ant;
2172 struct il_station_priv *sta_priv;
2173 struct il_rxon_context *ctx;
2174
2175 if (!sta || !lq_sta)
2176 return;
2177
2178 sta_priv = (void *)sta->drv_priv;
2179 ctx = sta_priv->common.ctx;
2180
2181 i = lq_sta->last_txrate_idx;
2182
2183 valid_tx_ant = il->hw_params.valid_tx_ant;
2184
2185 if (!lq_sta->search_better_tbl)
2186 active_tbl = lq_sta->active_tbl;
2187 else
2188 active_tbl = 1 - lq_sta->active_tbl;
2189
2190 tbl = &(lq_sta->lq_info[active_tbl]);
2191
2192 if (i < 0 || i >= RATE_COUNT)
2193 i = 0;
2194
2195 rate = il_rates[i].plcp;
2196 tbl->ant_type = il4965_first_antenna(valid_tx_ant);
2197 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2198
2199 if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
2200 rate |= RATE_MCS_CCK_MSK;
2201
2202 il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
2203 if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2204 il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2205
2206 rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
2207 tbl->current_rate = rate;
2208 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
2209 il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2210 il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2211 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
2212}
2213
2214static void
2215il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2216 struct ieee80211_tx_rate_control *txrc)
2217{
2218
2219 struct sk_buff *skb = txrc->skb;
2220 struct ieee80211_supported_band *sband = txrc->sband;
2221 struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
2222 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2223 struct il_lq_sta *lq_sta = il_sta;
2224 int rate_idx;
2225
2226 D_RATE("rate scale calculate new rate for skb\n");
2227
2228 /* Get max rate if user set max rate */
2229 if (lq_sta) {
2230 lq_sta->max_rate_idx = txrc->max_rate_idx;
2231 if (sband->band == IEEE80211_BAND_5GHZ &&
2232 lq_sta->max_rate_idx != -1)
2233 lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
2234 if (lq_sta->max_rate_idx < 0 ||
2235 lq_sta->max_rate_idx >= RATE_COUNT)
2236 lq_sta->max_rate_idx = -1;
2237 }
2238
2239 /* Treat uninitialized rate scaling data same as non-existing. */
2240 if (lq_sta && !lq_sta->drv) {
2241 D_RATE("Rate scaling not initialized yet.\n");
2242 il_sta = NULL;
2243 }
2244
2245 /* Send management frames and NO_ACK data using lowest rate. */
2246 if (rate_control_send_low(sta, il_sta, txrc))
2247 return;
2248
2249 if (!lq_sta)
2250 return;
2251
2252 rate_idx = lq_sta->last_txrate_idx;
2253
2254 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2255 rate_idx -= IL_FIRST_OFDM_RATE;
2256 /* 6M and 9M shared same MCS idx */
2257 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2258 if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2259 RATE_MIMO2_6M_PLCP)
2260 rate_idx = rate_idx + MCS_IDX_PER_STREAM;
2261 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2262 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2263 info->control.rates[0].flags |=
2264 IEEE80211_TX_RC_SHORT_GI;
2265 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2266 info->control.rates[0].flags |=
2267 IEEE80211_TX_RC_DUP_DATA;
2268 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2269 info->control.rates[0].flags |=
2270 IEEE80211_TX_RC_40_MHZ_WIDTH;
2271 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2272 info->control.rates[0].flags |=
2273 IEEE80211_TX_RC_GREEN_FIELD;
2274 } else {
2275 /* Check for invalid rates */
2276 if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
2277 (sband->band == IEEE80211_BAND_5GHZ &&
2278 rate_idx < IL_FIRST_OFDM_RATE))
2279 rate_idx = rate_lowest_index(sband, sta);
2280 /* On valid 5 GHz rate, adjust idx */
2281 else if (sband->band == IEEE80211_BAND_5GHZ)
2282 rate_idx -= IL_FIRST_OFDM_RATE;
2283 info->control.rates[0].flags = 0;
2284 }
2285 info->control.rates[0].idx = rate_idx;
2286
2287}
2288
2289static void *
2290il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
2291{
2292 struct il_station_priv *sta_priv =
2293 (struct il_station_priv *)sta->drv_priv;
2294 struct il_priv *il;
2295
2296 il = (struct il_priv *)il_rate;
2297 D_RATE("create station rate scale win\n");
2298
2299 return &sta_priv->lq_sta;
2300}
2301
2302/*
2303 * Called after adding a new station to initialize rate scaling
2304 */
2305void
2306il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
2307{
2308 int i, j;
2309 struct ieee80211_hw *hw = il->hw;
2310 struct ieee80211_conf *conf = &il->hw->conf;
2311 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2312 struct il_station_priv *sta_priv;
2313 struct il_lq_sta *lq_sta;
2314 struct ieee80211_supported_band *sband;
2315
2316 sta_priv = (struct il_station_priv *)sta->drv_priv;
2317 lq_sta = &sta_priv->lq_sta;
2318 sband = hw->wiphy->bands[conf->channel->band];
2319
2320 lq_sta->lq.sta_id = sta_id;
2321
2322 for (j = 0; j < LQ_SIZE; j++)
2323 for (i = 0; i < RATE_COUNT; i++)
2324 il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
2325 win[i]);
2326
2327 lq_sta->flush_timer = 0;
2328 lq_sta->supp_rates = sta->supp_rates[sband->band];
2329 for (j = 0; j < LQ_SIZE; j++)
2330 for (i = 0; i < RATE_COUNT; i++)
2331 il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
2332 win[i]);
2333
2334 D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
2335 sta_id);
2336 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2337 * the lowest or the highest rate.. Could consider using RSSI from
2338 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2339 * after assoc.. */
2340
2341 lq_sta->is_dup = 0;
2342 lq_sta->max_rate_idx = -1;
2343 lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
2344 lq_sta->is_green = il4965_rs_use_green(sta);
2345 lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
2346 lq_sta->band = il->band;
2347 /*
2348 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2349 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2350 */
2351 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2352 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2353 lq_sta->active_siso_rate &= ~((u16) 0x2);
2354 lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
2355
2356 /* Same here */
2357 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2358 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2359 lq_sta->active_mimo2_rate &= ~((u16) 0x2);
2360 lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
2361
2362 /* These values will be overridden later */
2363 lq_sta->lq.general_params.single_stream_ant_msk =
2364 il4965_first_antenna(il->hw_params.valid_tx_ant);
2365 lq_sta->lq.general_params.dual_stream_ant_msk =
2366 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
2367 valid_tx_ant);
2368 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2369 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2370 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
2371 lq_sta->lq.general_params.dual_stream_ant_msk =
2372 il->hw_params.valid_tx_ant;
2373 }
2374
2375 /* as default allow aggregation for all tids */
2376 lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
2377 lq_sta->drv = il;
2378
2379 /* Set last_txrate_idx to lowest rate */
2380 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2381 if (sband->band == IEEE80211_BAND_5GHZ)
2382 lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
2383 lq_sta->is_agg = 0;
2384
2385#ifdef CONFIG_MAC80211_DEBUGFS
2386 lq_sta->dbg_fixed_rate = 0;
2387#endif
2388
2389 il4965_rs_initialize_lq(il, conf, sta, lq_sta);
2390}
2391
2392static void
2393il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
2394 u32 new_rate)
2395{
2396 struct il_scale_tbl_info tbl_type;
2397 int idx = 0;
2398 int rate_idx;
2399 int repeat_rate = 0;
2400 u8 ant_toggle_cnt = 0;
2401 u8 use_ht_possible = 1;
2402 u8 valid_tx_ant = 0;
2403 struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
2404
2405 /* Override starting rate (idx 0) if needed for debug purposes */
2406 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2407
2408 /* Interpret new_rate (rate_n_flags) */
2409 il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2410 &rate_idx);
2411
2412 /* How many times should we repeat the initial rate? */
2413 if (is_legacy(tbl_type.lq_type)) {
2414 ant_toggle_cnt = 1;
2415 repeat_rate = IL_NUMBER_TRY;
2416 } else {
2417 repeat_rate = IL_HT_NUMBER_TRY;
2418 }
2419
2420 lq_cmd->general_params.mimo_delimiter =
2421 is_mimo(tbl_type.lq_type) ? 1 : 0;
2422
2423 /* Fill 1st table entry (idx 0) */
2424 lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
2425
2426 if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
2427 lq_cmd->general_params.single_stream_ant_msk =
2428 tbl_type.ant_type;
2429 } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
2430 lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
2431 }
2432 /* otherwise we don't modify the existing value */
2433 idx++;
2434 repeat_rate--;
2435 if (il)
2436 valid_tx_ant = il->hw_params.valid_tx_ant;
2437
2438 /* Fill rest of rate table */
2439 while (idx < LINK_QUAL_MAX_RETRY_NUM) {
2440 /* Repeat initial/next rate.
2441 * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
2442 * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
2443 while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
2444 if (is_legacy(tbl_type.lq_type)) {
2445 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2446 ant_toggle_cnt++;
2447 else if (il &&
2448 il4965_rs_toggle_antenna(valid_tx_ant,
2449 &new_rate,
2450 &tbl_type))
2451 ant_toggle_cnt = 1;
2452 }
2453
2454 /* Override next rate if needed for debug purposes */
2455 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2456
2457 /* Fill next table entry */
2458 lq_cmd->rs_table[idx].rate_n_flags =
2459 cpu_to_le32(new_rate);
2460 repeat_rate--;
2461 idx++;
2462 }
2463
2464 il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2465 &tbl_type, &rate_idx);
2466
2467 /* Indicate to uCode which entries might be MIMO.
2468 * If initial rate was MIMO, this will finally end up
2469 * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2470 if (is_mimo(tbl_type.lq_type))
2471 lq_cmd->general_params.mimo_delimiter = idx;
2472
2473 /* Get next rate */
2474 new_rate =
2475 il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
2476 use_ht_possible);
2477
2478 /* How many times should we repeat the next rate? */
2479 if (is_legacy(tbl_type.lq_type)) {
2480 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2481 ant_toggle_cnt++;
2482 else if (il &&
2483 il4965_rs_toggle_antenna(valid_tx_ant,
2484 &new_rate, &tbl_type))
2485 ant_toggle_cnt = 1;
2486
2487 repeat_rate = IL_NUMBER_TRY;
2488 } else {
2489 repeat_rate = IL_HT_NUMBER_TRY;
2490 }
2491
2492 /* Don't allow HT rates after next pass.
2493 * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2494 use_ht_possible = 0;
2495
2496 /* Override next rate if needed for debug purposes */
2497 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2498
2499 /* Fill next table entry */
2500 lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
2501
2502 idx++;
2503 repeat_rate--;
2504 }
2505
2506 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2507 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2508
2509 lq_cmd->agg_params.agg_time_limit =
2510 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2511}
2512
2513static void *
2514il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2515{
2516 return hw->priv;
2517}
2518
2519/* rate scale requires free function to be implemented */
2520static void
2521il4965_rs_free(void *il_rate)
2522{
2523 return;
2524}
2525
2526static void
2527il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
2528{
2529 struct il_priv *il __maybe_unused = il_r;
2530
2531 D_RATE("enter\n");
2532 D_RATE("leave\n");
2533}
2534
2535#ifdef CONFIG_MAC80211_DEBUGFS
2536static int
2537il4965_open_file_generic(struct inode *inode, struct file *file)
2538{
2539 file->private_data = inode->i_private;
2540 return 0;
2541}
2542
2543static void
2544il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
2545{
2546 struct il_priv *il;
2547 u8 valid_tx_ant;
2548 u8 ant_sel_tx;
2549
2550 il = lq_sta->drv;
2551 valid_tx_ant = il->hw_params.valid_tx_ant;
2552 if (lq_sta->dbg_fixed_rate) {
2553 ant_sel_tx =
2554 ((lq_sta->
2555 dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
2556 RATE_MCS_ANT_POS);
2557 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2558 *rate_n_flags = lq_sta->dbg_fixed_rate;
2559 D_RATE("Fixed rate ON\n");
2560 } else {
2561 lq_sta->dbg_fixed_rate = 0;
2562 IL_ERR
2563 ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
2564 ant_sel_tx, valid_tx_ant);
2565 D_RATE("Fixed rate OFF\n");
2566 }
2567 } else {
2568 D_RATE("Fixed rate OFF\n");
2569 }
2570}
2571
2572static ssize_t
2573il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2574 const char __user *user_buf,
2575 size_t count, loff_t *ppos)
2576{
2577 struct il_lq_sta *lq_sta = file->private_data;
2578 struct il_priv *il;
2579 char buf[64];
2580 size_t buf_size;
2581 u32 parsed_rate;
2582 struct il_station_priv *sta_priv =
2583 container_of(lq_sta, struct il_station_priv, lq_sta);
2584 struct il_rxon_context *ctx = sta_priv->common.ctx;
2585
2586 il = lq_sta->drv;
2587 memset(buf, 0, sizeof(buf));
2588 buf_size = min(count, sizeof(buf) - 1);
2589 if (copy_from_user(buf, user_buf, buf_size))
2590 return -EFAULT;
2591
2592 if (sscanf(buf, "%x", &parsed_rate) == 1)
2593 lq_sta->dbg_fixed_rate = parsed_rate;
2594 else
2595 lq_sta->dbg_fixed_rate = 0;
2596
2597 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2598 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2599 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2600
2601 D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
2602 lq_sta->dbg_fixed_rate);
2603
2604 if (lq_sta->dbg_fixed_rate) {
2605 il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2606 il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2607 }
2608
2609 return count;
2610}
2611
2612static ssize_t
2613il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
2614 size_t count, loff_t *ppos)
2615{
2616 char *buff;
2617 int desc = 0;
2618 int i = 0;
2619 int idx = 0;
2620 ssize_t ret;
2621
2622 struct il_lq_sta *lq_sta = file->private_data;
2623 struct il_priv *il;
2624 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2625
2626 il = lq_sta->drv;
2627 buff = kmalloc(1024, GFP_KERNEL);
2628 if (!buff)
2629 return -ENOMEM;
2630
2631 desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
2632 desc +=
2633 sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
2634 lq_sta->total_failed, lq_sta->total_success,
2635 lq_sta->active_legacy_rate);
2636 desc +=
2637 sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
2638 desc +=
2639 sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
2640 (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2641 (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2642 (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2643 desc +=
2644 sprintf(buff + desc, "lq type %s\n",
2645 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2646 if (is_Ht(tbl->lq_type)) {
2647 desc +=
2648 sprintf(buff + desc, " %s",
2649 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2650 desc +=
2651 sprintf(buff + desc, " %s",
2652 (tbl->is_ht40) ? "40MHz" : "20MHz");
2653 desc +=
2654 sprintf(buff + desc, " %s %s %s\n",
2655 (tbl->is_SGI) ? "SGI" : "",
2656 (lq_sta->is_green) ? "GF enabled" : "",
2657 (lq_sta->is_agg) ? "AGG on" : "");
2658 }
2659 desc +=
2660 sprintf(buff + desc, "last tx rate=0x%X\n",
2661 lq_sta->last_rate_n_flags);
2662 desc +=
2663 sprintf(buff + desc,
2664 "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2665 lq_sta->lq.general_params.flags,
2666 lq_sta->lq.general_params.mimo_delimiter,
2667 lq_sta->lq.general_params.single_stream_ant_msk,
2668 lq_sta->lq.general_params.dual_stream_ant_msk);
2669
2670 desc +=
2671 sprintf(buff + desc,
2672 "agg:"
2673 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2674 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2675 lq_sta->lq.agg_params.agg_dis_start_th,
2676 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2677
2678 desc +=
2679 sprintf(buff + desc,
2680 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2681 lq_sta->lq.general_params.start_rate_idx[0],
2682 lq_sta->lq.general_params.start_rate_idx[1],
2683 lq_sta->lq.general_params.start_rate_idx[2],
2684 lq_sta->lq.general_params.start_rate_idx[3]);
2685
2686 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2687 idx =
2688 il4965_hwrate_to_plcp_idx(le32_to_cpu
2689 (lq_sta->lq.rs_table[i].
2690 rate_n_flags));
2691 if (is_legacy(tbl->lq_type)) {
2692 desc +=
2693 sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
2694 le32_to_cpu(lq_sta->lq.rs_table[i].
2695 rate_n_flags),
2696 il_rate_mcs[idx].mbps);
2697 } else {
2698 desc +=
2699 sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
2700 i,
2701 le32_to_cpu(lq_sta->lq.rs_table[i].
2702 rate_n_flags),
2703 il_rate_mcs[idx].mbps,
2704 il_rate_mcs[idx].mcs);
2705 }
2706 }
2707
2708 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2709 kfree(buff);
2710 return ret;
2711}
2712
2713static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2714 .write = il4965_rs_sta_dbgfs_scale_table_write,
2715 .read = il4965_rs_sta_dbgfs_scale_table_read,
2716 .open = il4965_open_file_generic,
2717 .llseek = default_llseek,
2718};
2719
2720static ssize_t
2721il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
2722 size_t count, loff_t *ppos)
2723{
2724 char *buff;
2725 int desc = 0;
2726 int i, j;
2727 ssize_t ret;
2728
2729 struct il_lq_sta *lq_sta = file->private_data;
2730
2731 buff = kmalloc(1024, GFP_KERNEL);
2732 if (!buff)
2733 return -ENOMEM;
2734
2735 for (i = 0; i < LQ_SIZE; i++) {
2736 desc +=
2737 sprintf(buff + desc,
2738 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2739 "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
2740 lq_sta->lq_info[i].lq_type,
2741 lq_sta->lq_info[i].is_SGI,
2742 lq_sta->lq_info[i].is_ht40,
2743 lq_sta->lq_info[i].is_dup, lq_sta->is_green,
2744 lq_sta->lq_info[i].current_rate);
2745 for (j = 0; j < RATE_COUNT; j++) {
2746 desc +=
2747 sprintf(buff + desc,
2748 "counter=%d success=%d %%=%d\n",
2749 lq_sta->lq_info[i].win[j].counter,
2750 lq_sta->lq_info[i].win[j].success_counter,
2751 lq_sta->lq_info[i].win[j].success_ratio);
2752 }
2753 }
2754 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2755 kfree(buff);
2756 return ret;
2757}
2758
2759static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2760 .read = il4965_rs_sta_dbgfs_stats_table_read,
2761 .open = il4965_open_file_generic,
2762 .llseek = default_llseek,
2763};
2764
2765static ssize_t
2766il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2767 char __user *user_buf, size_t count,
2768 loff_t *ppos)
2769{
2770 char buff[120];
2771 int desc = 0;
2772 struct il_lq_sta *lq_sta = file->private_data;
2773 struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2774
2775 if (is_Ht(tbl->lq_type))
2776 desc +=
2777 sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
2778 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2779 else
2780 desc +=
2781 sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
2782 il_rates[lq_sta->last_txrate_idx].ieee >> 1);
2783
2784 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2785}
2786
2787static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2788 .read = il4965_rs_sta_dbgfs_rate_scale_data_read,
2789 .open = il4965_open_file_generic,
2790 .llseek = default_llseek,
2791};
2792
2793static void
2794il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
2795{
2796 struct il_lq_sta *lq_sta = il_sta;
2797 lq_sta->rs_sta_dbgfs_scale_table_file =
2798 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2799 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2800 lq_sta->rs_sta_dbgfs_stats_table_file =
2801 debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
2802 &rs_sta_dbgfs_stats_table_ops);
2803 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2804 debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
2805 &rs_sta_dbgfs_rate_scale_data_ops);
2806 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2807 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2808 &lq_sta->tx_agg_tid_en);
2809
2810}
2811
2812static void
2813il4965_rs_remove_debugfs(void *il, void *il_sta)
2814{
2815 struct il_lq_sta *lq_sta = il_sta;
2816 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2817 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2818 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2819 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2820}
2821#endif
2822
2823/*
2824 * Initialization of rate scaling information is done by driver after
2825 * the station is added. Since mac80211 calls this function before a
2826 * station is added we ignore it.
2827 */
2828static void
2829il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
2830 struct ieee80211_sta *sta, void *il_sta)
2831{
2832}
2833
2834static struct rate_control_ops rs_4965_ops = {
2835 .module = NULL,
2836 .name = IL4965_RS_NAME,
2837 .tx_status = il4965_rs_tx_status,
2838 .get_rate = il4965_rs_get_rate,
2839 .rate_init = il4965_rs_rate_init_stub,
2840 .alloc = il4965_rs_alloc,
2841 .free = il4965_rs_free,
2842 .alloc_sta = il4965_rs_alloc_sta,
2843 .free_sta = il4965_rs_free_sta,
2844#ifdef CONFIG_MAC80211_DEBUGFS
2845 .add_sta_debugfs = il4965_rs_add_debugfs,
2846 .remove_sta_debugfs = il4965_rs_remove_debugfs,
2847#endif
2848};
2849
2850int
2851il4965_rate_control_register(void)
2852{
2853 return ieee80211_rate_control_register(&rs_4965_ops);
2854}
2855
2856void
2857il4965_rate_control_unregister(void)
2858{
2859 ieee80211_rate_control_unregister(&rs_4965_ops);
2860}
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
new file mode 100644
index 000000000000..84c54dccf195
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -0,0 +1,2421 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "common.h"
41#include "4965.h"
42
43/**
44 * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
45 * using sample data 100 bytes apart. If these sample points are good,
46 * it's a pretty good bet that everything between them is good, too.
47 */
48static int
49il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
50{
51 u32 val;
52 int ret = 0;
53 u32 errcnt = 0;
54 u32 i;
55
56 D_INFO("ucode inst image size is %u\n", len);
57
58 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
59 /* read data comes through single port, auto-incr addr */
60 /* NOTE: Use the debugless read so we don't flood kernel log
61 * if IL_DL_IO is set */
62 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
63 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
64 if (val != le32_to_cpu(*image)) {
65 ret = -EIO;
66 errcnt++;
67 if (errcnt >= 3)
68 break;
69 }
70 }
71
72 return ret;
73}
74
75/**
76 * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
77 * looking at all data.
78 */
79static int
80il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
81{
82 u32 val;
83 u32 save_len = len;
84 int ret = 0;
85 u32 errcnt;
86
87 D_INFO("ucode inst image size is %u\n", len);
88
89 il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
90
91 errcnt = 0;
92 for (; len > 0; len -= sizeof(u32), image++) {
93 /* read data comes through single port, auto-incr addr */
94 /* NOTE: Use the debugless read so we don't flood kernel log
95 * if IL_DL_IO is set */
96 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
97 if (val != le32_to_cpu(*image)) {
98 IL_ERR("uCode INST section is invalid at "
99 "offset 0x%x, is 0x%x, s/b 0x%x\n",
100 save_len - len, val, le32_to_cpu(*image));
101 ret = -EIO;
102 errcnt++;
103 if (errcnt >= 20)
104 break;
105 }
106 }
107
108 if (!errcnt)
109 D_INFO("ucode image in INSTRUCTION memory is good\n");
110
111 return ret;
112}
113
114/**
115 * il4965_verify_ucode - determine which instruction image is in SRAM,
116 * and verify its contents
117 */
118int
119il4965_verify_ucode(struct il_priv *il)
120{
121 __le32 *image;
122 u32 len;
123 int ret;
124
125 /* Try bootstrap */
126 image = (__le32 *) il->ucode_boot.v_addr;
127 len = il->ucode_boot.len;
128 ret = il4965_verify_inst_sparse(il, image, len);
129 if (!ret) {
130 D_INFO("Bootstrap uCode is good in inst SRAM\n");
131 return 0;
132 }
133
134 /* Try initialize */
135 image = (__le32 *) il->ucode_init.v_addr;
136 len = il->ucode_init.len;
137 ret = il4965_verify_inst_sparse(il, image, len);
138 if (!ret) {
139 D_INFO("Initialize uCode is good in inst SRAM\n");
140 return 0;
141 }
142
143 /* Try runtime/protocol */
144 image = (__le32 *) il->ucode_code.v_addr;
145 len = il->ucode_code.len;
146 ret = il4965_verify_inst_sparse(il, image, len);
147 if (!ret) {
148 D_INFO("Runtime uCode is good in inst SRAM\n");
149 return 0;
150 }
151
152 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
153
154 /* Since nothing seems to match, show first several data entries in
155 * instruction SRAM, so maybe visual inspection will give a clue.
156 * Selection of bootstrap image (vs. other images) is arbitrary. */
157 image = (__le32 *) il->ucode_boot.v_addr;
158 len = il->ucode_boot.len;
159 ret = il4965_verify_inst_full(il, image, len);
160
161 return ret;
162}
163
164/******************************************************************************
165 *
166 * EEPROM related functions
167 *
168******************************************************************************/
169
170/*
171 * The device's EEPROM semaphore prevents conflicts between driver and uCode
172 * when accessing the EEPROM; each access is a series of pulses to/from the
173 * EEPROM chip, not a single event, so even reads could conflict if they
174 * weren't arbitrated by the semaphore.
175 */
176int
177il4965_eeprom_acquire_semaphore(struct il_priv *il)
178{
179 u16 count;
180 int ret;
181
182 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
183 /* Request semaphore */
184 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
185 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
186
187 /* See if we got it */
188 ret =
189 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
190 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
191 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
192 EEPROM_SEM_TIMEOUT);
193 if (ret >= 0)
194 return ret;
195 }
196
197 return ret;
198}
199
200void
201il4965_eeprom_release_semaphore(struct il_priv *il)
202{
203 il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
204 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
205
206}
207
208int
209il4965_eeprom_check_version(struct il_priv *il)
210{
211 u16 eeprom_ver;
212 u16 calib_ver;
213
214 eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
215 calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
216
217 if (eeprom_ver < il->cfg->eeprom_ver ||
218 calib_ver < il->cfg->eeprom_calib_ver)
219 goto err;
220
221 IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
222
223 return 0;
224err:
225 IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
226 "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
227 calib_ver, il->cfg->eeprom_calib_ver);
228 return -EINVAL;
229
230}
231
232void
233il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
234{
235 const u8 *addr = il_eeprom_query_addr(il,
236 EEPROM_MAC_ADDRESS);
237 memcpy(mac, addr, ETH_ALEN);
238}
239
240/* Send led command */
241static int
242il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
243{
244 struct il_host_cmd cmd = {
245 .id = C_LEDS,
246 .len = sizeof(struct il_led_cmd),
247 .data = led_cmd,
248 .flags = CMD_ASYNC,
249 .callback = NULL,
250 };
251 u32 reg;
252
253 reg = _il_rd(il, CSR_LED_REG);
254 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
255 _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
256
257 return il_send_cmd(il, &cmd);
258}
259
260/* Set led register off */
261void
262il4965_led_enable(struct il_priv *il)
263{
264 _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
265}
266
267const struct il_led_ops il4965_led_ops = {
268 .cmd = il4965_send_led_cmd,
269};
270
271static int il4965_send_tx_power(struct il_priv *il);
272static int il4965_hw_get_temperature(struct il_priv *il);
273
274/* Highest firmware API version supported */
275#define IL4965_UCODE_API_MAX 2
276
277/* Lowest firmware API version supported */
278#define IL4965_UCODE_API_MIN 2
279
280#define IL4965_FW_PRE "iwlwifi-4965-"
281#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
282#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
283
284/* check contents of special bootstrap uCode SRAM */
285static int
286il4965_verify_bsm(struct il_priv *il)
287{
288 __le32 *image = il->ucode_boot.v_addr;
289 u32 len = il->ucode_boot.len;
290 u32 reg;
291 u32 val;
292
293 D_INFO("Begin verify bsm\n");
294
295 /* verify BSM SRAM contents */
296 val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
297 for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
298 reg += sizeof(u32), image++) {
299 val = il_rd_prph(il, reg);
300 if (val != le32_to_cpu(*image)) {
301 IL_ERR("BSM uCode verification failed at "
302 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
303 BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
304 len, val, le32_to_cpu(*image));
305 return -EIO;
306 }
307 }
308
309 D_INFO("BSM bootstrap uCode image OK\n");
310
311 return 0;
312}
313
314/**
315 * il4965_load_bsm - Load bootstrap instructions
316 *
317 * BSM operation:
318 *
319 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
320 * in special SRAM that does not power down during RFKILL. When powering back
321 * up after power-saving sleeps (or during initial uCode load), the BSM loads
322 * the bootstrap program into the on-board processor, and starts it.
323 *
324 * The bootstrap program loads (via DMA) instructions and data for a new
325 * program from host DRAM locations indicated by the host driver in the
326 * BSM_DRAM_* registers. Once the new program is loaded, it starts
327 * automatically.
328 *
329 * When initializing the NIC, the host driver points the BSM to the
330 * "initialize" uCode image. This uCode sets up some internal data, then
331 * notifies host via "initialize alive" that it is complete.
332 *
333 * The host then replaces the BSM_DRAM_* pointer values to point to the
334 * normal runtime uCode instructions and a backup uCode data cache buffer
335 * (filled initially with starting data values for the on-board processor),
336 * then triggers the "initialize" uCode to load and launch the runtime uCode,
337 * which begins normal operation.
338 *
339 * When doing a power-save shutdown, runtime uCode saves data SRAM into
340 * the backup data cache in DRAM before SRAM is powered down.
341 *
342 * When powering back up, the BSM loads the bootstrap program. This reloads
343 * the runtime uCode instructions and the backup data cache into SRAM,
344 * and re-launches the runtime uCode from where it left off.
345 */
346static int
347il4965_load_bsm(struct il_priv *il)
348{
349 __le32 *image = il->ucode_boot.v_addr;
350 u32 len = il->ucode_boot.len;
351 dma_addr_t pinst;
352 dma_addr_t pdata;
353 u32 inst_len;
354 u32 data_len;
355 int i;
356 u32 done;
357 u32 reg_offset;
358 int ret;
359
360 D_INFO("Begin load bsm\n");
361
362 il->ucode_type = UCODE_RT;
363
364 /* make sure bootstrap program is no larger than BSM's SRAM size */
365 if (len > IL49_MAX_BSM_SIZE)
366 return -EINVAL;
367
368 /* Tell bootstrap uCode where to find the "Initialize" uCode
369 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
370 * NOTE: il_init_alive_start() will replace these values,
371 * after the "initialize" uCode has run, to point to
372 * runtime/protocol instructions and backup data cache.
373 */
374 pinst = il->ucode_init.p_addr >> 4;
375 pdata = il->ucode_init_data.p_addr >> 4;
376 inst_len = il->ucode_init.len;
377 data_len = il->ucode_init_data.len;
378
379 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
380 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
381 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
382 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
383
384 /* Fill BSM memory with bootstrap instructions */
385 for (reg_offset = BSM_SRAM_LOWER_BOUND;
386 reg_offset < BSM_SRAM_LOWER_BOUND + len;
387 reg_offset += sizeof(u32), image++)
388 _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
389
390 ret = il4965_verify_bsm(il);
391 if (ret)
392 return ret;
393
394 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
395 il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
396 il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
397 il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
398
399 /* Load bootstrap code into instruction SRAM now,
400 * to prepare to load "initialize" uCode */
401 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
402
403 /* Wait for load of bootstrap uCode to finish */
404 for (i = 0; i < 100; i++) {
405 done = il_rd_prph(il, BSM_WR_CTRL_REG);
406 if (!(done & BSM_WR_CTRL_REG_BIT_START))
407 break;
408 udelay(10);
409 }
410 if (i < 100)
411 D_INFO("BSM write complete, poll %d iterations\n", i);
412 else {
413 IL_ERR("BSM write did not complete!\n");
414 return -EIO;
415 }
416
417 /* Enable future boot loads whenever power management unit triggers it
418 * (e.g. when powering back up after power-save shutdown) */
419 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
420
421 return 0;
422}
423
424/**
425 * il4965_set_ucode_ptrs - Set uCode address location
426 *
427 * Tell initialization uCode where to find runtime uCode.
428 *
429 * BSM registers initially contain pointers to initialization uCode.
430 * We need to replace them to load runtime uCode inst and data,
431 * and to save runtime data when powering down.
432 */
433static int
434il4965_set_ucode_ptrs(struct il_priv *il)
435{
436 dma_addr_t pinst;
437 dma_addr_t pdata;
438 int ret = 0;
439
440 /* bits 35:4 for 4965 */
441 pinst = il->ucode_code.p_addr >> 4;
442 pdata = il->ucode_data_backup.p_addr >> 4;
443
444 /* Tell bootstrap uCode where to find image to load */
445 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
446 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
447 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
448
449 /* Inst byte count must be last to set up, bit 31 signals uCode
450 * that all new ptr/size info is in place */
451 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
452 il->ucode_code.len | BSM_DRAM_INST_LOAD);
453 D_INFO("Runtime uCode pointers are set.\n");
454
455 return ret;
456}
457
458/**
459 * il4965_init_alive_start - Called after N_ALIVE notification received
460 *
461 * Called after N_ALIVE notification received from "initialize" uCode.
462 *
463 * The 4965 "initialize" ALIVE reply contains calibration data for:
464 * Voltage, temperature, and MIMO tx gain correction, now stored in il
465 * (3945 does not contain this data).
466 *
467 * Tell "initialize" uCode to go ahead and load the runtime uCode.
468*/
469static void
470il4965_init_alive_start(struct il_priv *il)
471{
472 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
473 * This is a paranoid check, because we would not have gotten the
474 * "initialize" alive if code weren't properly loaded. */
475 if (il4965_verify_ucode(il)) {
476 /* Runtime instruction load was bad;
477 * take it all the way back down so we can try again */
478 D_INFO("Bad \"initialize\" uCode load.\n");
479 goto restart;
480 }
481
482 /* Calculate temperature */
483 il->temperature = il4965_hw_get_temperature(il);
484
485 /* Send pointers to protocol/runtime uCode image ... init code will
486 * load and launch runtime uCode, which will send us another "Alive"
487 * notification. */
488 D_INFO("Initialization Alive received.\n");
489 if (il4965_set_ucode_ptrs(il)) {
490 /* Runtime instruction load won't happen;
491 * take it all the way back down so we can try again */
492 D_INFO("Couldn't set up uCode pointers.\n");
493 goto restart;
494 }
495 return;
496
497restart:
498 queue_work(il->workqueue, &il->restart);
499}
500
501static bool
502iw4965_is_ht40_channel(__le32 rxon_flags)
503{
504 int chan_mod =
505 le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
506 RXON_FLG_CHANNEL_MODE_POS;
507 return (chan_mod == CHANNEL_MODE_PURE_40 ||
508 chan_mod == CHANNEL_MODE_MIXED);
509}
510
511static void
512il4965_nic_config(struct il_priv *il)
513{
514 unsigned long flags;
515 u16 radio_cfg;
516
517 spin_lock_irqsave(&il->lock, flags);
518
519 radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
520
521 /* write radio config values to register */
522 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
523 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
524 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
525 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
526 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
527
528 /* set CSR_HW_CONFIG_REG for uCode use */
529 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
530 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
531 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
532
533 il->calib_info =
534 (struct il_eeprom_calib_info *)
535 il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
536
537 spin_unlock_irqrestore(&il->lock, flags);
538}
539
540/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
541 * Called after every association, but this runs only once!
542 * ... once chain noise is calibrated the first time, it's good forever. */
543static void
544il4965_chain_noise_reset(struct il_priv *il)
545{
546 struct il_chain_noise_data *data = &(il->chain_noise_data);
547
548 if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
549 struct il_calib_diff_gain_cmd cmd;
550
551 /* clear data for chain noise calibration algorithm */
552 data->chain_noise_a = 0;
553 data->chain_noise_b = 0;
554 data->chain_noise_c = 0;
555 data->chain_signal_a = 0;
556 data->chain_signal_b = 0;
557 data->chain_signal_c = 0;
558 data->beacon_count = 0;
559
560 memset(&cmd, 0, sizeof(cmd));
561 cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
562 cmd.diff_gain_a = 0;
563 cmd.diff_gain_b = 0;
564 cmd.diff_gain_c = 0;
565 if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
566 IL_ERR("Could not send C_PHY_CALIBRATION\n");
567 data->state = IL_CHAIN_NOISE_ACCUMULATE;
568 D_CALIB("Run chain_noise_calibrate\n");
569 }
570}
571
572static struct il_sensitivity_ranges il4965_sensitivity = {
573 .min_nrg_cck = 97,
574 .max_nrg_cck = 0, /* not used, set to 0 */
575
576 .auto_corr_min_ofdm = 85,
577 .auto_corr_min_ofdm_mrc = 170,
578 .auto_corr_min_ofdm_x1 = 105,
579 .auto_corr_min_ofdm_mrc_x1 = 220,
580
581 .auto_corr_max_ofdm = 120,
582 .auto_corr_max_ofdm_mrc = 210,
583 .auto_corr_max_ofdm_x1 = 140,
584 .auto_corr_max_ofdm_mrc_x1 = 270,
585
586 .auto_corr_min_cck = 125,
587 .auto_corr_max_cck = 200,
588 .auto_corr_min_cck_mrc = 200,
589 .auto_corr_max_cck_mrc = 400,
590
591 .nrg_th_cck = 100,
592 .nrg_th_ofdm = 100,
593
594 .barker_corr_th_min = 190,
595 .barker_corr_th_min_mrc = 390,
596 .nrg_th_cca = 62,
597};
598
599static void
600il4965_set_ct_threshold(struct il_priv *il)
601{
602 /* want Kelvin */
603 il->hw_params.ct_kill_threshold =
604 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
605}
606
607/**
608 * il4965_hw_set_hw_params
609 *
610 * Called when initializing driver
611 */
612static int
613il4965_hw_set_hw_params(struct il_priv *il)
614{
615 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
616 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
617 il->cfg->base_params->num_of_queues =
618 il->cfg->mod_params->num_of_queues;
619
620 il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
621 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
622 il->hw_params.scd_bc_tbls_size =
623 il->cfg->base_params->num_of_queues *
624 sizeof(struct il4965_scd_bc_tbl);
625 il->hw_params.tfd_size = sizeof(struct il_tfd);
626 il->hw_params.max_stations = IL4965_STATION_COUNT;
627 il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
628 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
629 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
630 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
631 il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
632
633 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
634
635 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
636 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
637 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
638 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
639
640 il4965_set_ct_threshold(il);
641
642 il->hw_params.sens = &il4965_sensitivity;
643 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
644
645 return 0;
646}
647
648static s32
649il4965_math_div_round(s32 num, s32 denom, s32 * res)
650{
651 s32 sign = 1;
652
653 if (num < 0) {
654 sign = -sign;
655 num = -num;
656 }
657 if (denom < 0) {
658 sign = -sign;
659 denom = -denom;
660 }
661 *res = 1;
662 *res = ((num * 2 + denom) / (denom * 2)) * sign;
663
664 return 1;
665}
666
667/**
668 * il4965_get_voltage_compensation - Power supply voltage comp for txpower
669 *
670 * Determines power supply voltage compensation for txpower calculations.
671 * Returns number of 1/2-dB steps to subtract from gain table idx,
672 * to compensate for difference between power supply voltage during
673 * factory measurements, vs. current power supply voltage.
674 *
675 * Voltage indication is higher for lower voltage.
676 * Lower voltage requires more gain (lower gain table idx).
677 */
678static s32
679il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
680{
681 s32 comp = 0;
682
683 if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
684 TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
685 return 0;
686
687 il4965_math_div_round(current_voltage - eeprom_voltage,
688 TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
689
690 if (current_voltage > eeprom_voltage)
691 comp *= 2;
692 if ((comp < -2) || (comp > 2))
693 comp = 0;
694
695 return comp;
696}
697
698static s32
699il4965_get_tx_atten_grp(u16 channel)
700{
701 if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
702 channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
703 return CALIB_CH_GROUP_5;
704
705 if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
706 channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
707 return CALIB_CH_GROUP_1;
708
709 if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
710 channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
711 return CALIB_CH_GROUP_2;
712
713 if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
714 channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
715 return CALIB_CH_GROUP_3;
716
717 if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
718 channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
719 return CALIB_CH_GROUP_4;
720
721 return -EINVAL;
722}
723
724static u32
725il4965_get_sub_band(const struct il_priv *il, u32 channel)
726{
727 s32 b = -1;
728
729 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
730 if (il->calib_info->band_info[b].ch_from == 0)
731 continue;
732
733 if (channel >= il->calib_info->band_info[b].ch_from &&
734 channel <= il->calib_info->band_info[b].ch_to)
735 break;
736 }
737
738 return b;
739}
740
741static s32
742il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
743{
744 s32 val;
745
746 if (x2 == x1)
747 return y1;
748 else {
749 il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
750 return val + y2;
751 }
752}
753
754/**
755 * il4965_interpolate_chan - Interpolate factory measurements for one channel
756 *
757 * Interpolates factory measurements from the two sample channels within a
758 * sub-band, to apply to channel of interest. Interpolation is proportional to
759 * differences in channel frequencies, which is proportional to differences
760 * in channel number.
761 */
762static int
763il4965_interpolate_chan(struct il_priv *il, u32 channel,
764 struct il_eeprom_calib_ch_info *chan_info)
765{
766 s32 s = -1;
767 u32 c;
768 u32 m;
769 const struct il_eeprom_calib_measure *m1;
770 const struct il_eeprom_calib_measure *m2;
771 struct il_eeprom_calib_measure *omeas;
772 u32 ch_i1;
773 u32 ch_i2;
774
775 s = il4965_get_sub_band(il, channel);
776 if (s >= EEPROM_TX_POWER_BANDS) {
777 IL_ERR("Tx Power can not find channel %d\n", channel);
778 return -1;
779 }
780
781 ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
782 ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
783 chan_info->ch_num = (u8) channel;
784
785 D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
786 ch_i1, ch_i2);
787
788 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
789 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
790 m1 = &(il->calib_info->band_info[s].ch1.
791 measurements[c][m]);
792 m2 = &(il->calib_info->band_info[s].ch2.
793 measurements[c][m]);
794 omeas = &(chan_info->measurements[c][m]);
795
796 omeas->actual_pow =
797 (u8) il4965_interpolate_value(channel, ch_i1,
798 m1->actual_pow, ch_i2,
799 m2->actual_pow);
800 omeas->gain_idx =
801 (u8) il4965_interpolate_value(channel, ch_i1,
802 m1->gain_idx, ch_i2,
803 m2->gain_idx);
804 omeas->temperature =
805 (u8) il4965_interpolate_value(channel, ch_i1,
806 m1->temperature,
807 ch_i2,
808 m2->temperature);
809 omeas->pa_det =
810 (s8) il4965_interpolate_value(channel, ch_i1,
811 m1->pa_det, ch_i2,
812 m2->pa_det);
813
814 D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
815 m, m1->actual_pow, m2->actual_pow,
816 omeas->actual_pow);
817 D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
818 m, m1->gain_idx, m2->gain_idx,
819 omeas->gain_idx);
820 D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
821 m, m1->pa_det, m2->pa_det, omeas->pa_det);
822 D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c,
823 m, m1->temperature, m2->temperature,
824 omeas->temperature);
825 }
826 }
827
828 return 0;
829}
830
831/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
832 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
833static s32 back_off_table[] = {
834 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
835 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
836 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
837 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
838 10 /* CCK */
839};
840
841/* Thermal compensation values for txpower for various frequency ranges ...
842 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
843static struct il4965_txpower_comp_entry {
844 s32 degrees_per_05db_a;
845 s32 degrees_per_05db_a_denom;
846} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
847 {
848 9, 2}, /* group 0 5.2, ch 34-43 */
849 {
850 4, 1}, /* group 1 5.2, ch 44-70 */
851 {
852 4, 1}, /* group 2 5.2, ch 71-124 */
853 {
854 4, 1}, /* group 3 5.2, ch 125-200 */
855 {
856 3, 1} /* group 4 2.4, ch all */
857};
858
859static s32
860get_min_power_idx(s32 rate_power_idx, u32 band)
861{
862 if (!band) {
863 if ((rate_power_idx & 7) <= 4)
864 return MIN_TX_GAIN_IDX_52GHZ_EXT;
865 }
866 return MIN_TX_GAIN_IDX;
867}
868
869struct gain_entry {
870 u8 dsp;
871 u8 radio;
872};
873
874static const struct gain_entry gain_table[2][108] = {
875 /* 5.2GHz power gain idx table */
876 {
877 {123, 0x3F}, /* highest txpower */
878 {117, 0x3F},
879 {110, 0x3F},
880 {104, 0x3F},
881 {98, 0x3F},
882 {110, 0x3E},
883 {104, 0x3E},
884 {98, 0x3E},
885 {110, 0x3D},
886 {104, 0x3D},
887 {98, 0x3D},
888 {110, 0x3C},
889 {104, 0x3C},
890 {98, 0x3C},
891 {110, 0x3B},
892 {104, 0x3B},
893 {98, 0x3B},
894 {110, 0x3A},
895 {104, 0x3A},
896 {98, 0x3A},
897 {110, 0x39},
898 {104, 0x39},
899 {98, 0x39},
900 {110, 0x38},
901 {104, 0x38},
902 {98, 0x38},
903 {110, 0x37},
904 {104, 0x37},
905 {98, 0x37},
906 {110, 0x36},
907 {104, 0x36},
908 {98, 0x36},
909 {110, 0x35},
910 {104, 0x35},
911 {98, 0x35},
912 {110, 0x34},
913 {104, 0x34},
914 {98, 0x34},
915 {110, 0x33},
916 {104, 0x33},
917 {98, 0x33},
918 {110, 0x32},
919 {104, 0x32},
920 {98, 0x32},
921 {110, 0x31},
922 {104, 0x31},
923 {98, 0x31},
924 {110, 0x30},
925 {104, 0x30},
926 {98, 0x30},
927 {110, 0x25},
928 {104, 0x25},
929 {98, 0x25},
930 {110, 0x24},
931 {104, 0x24},
932 {98, 0x24},
933 {110, 0x23},
934 {104, 0x23},
935 {98, 0x23},
936 {110, 0x22},
937 {104, 0x18},
938 {98, 0x18},
939 {110, 0x17},
940 {104, 0x17},
941 {98, 0x17},
942 {110, 0x16},
943 {104, 0x16},
944 {98, 0x16},
945 {110, 0x15},
946 {104, 0x15},
947 {98, 0x15},
948 {110, 0x14},
949 {104, 0x14},
950 {98, 0x14},
951 {110, 0x13},
952 {104, 0x13},
953 {98, 0x13},
954 {110, 0x12},
955 {104, 0x08},
956 {98, 0x08},
957 {110, 0x07},
958 {104, 0x07},
959 {98, 0x07},
960 {110, 0x06},
961 {104, 0x06},
962 {98, 0x06},
963 {110, 0x05},
964 {104, 0x05},
965 {98, 0x05},
966 {110, 0x04},
967 {104, 0x04},
968 {98, 0x04},
969 {110, 0x03},
970 {104, 0x03},
971 {98, 0x03},
972 {110, 0x02},
973 {104, 0x02},
974 {98, 0x02},
975 {110, 0x01},
976 {104, 0x01},
977 {98, 0x01},
978 {110, 0x00},
979 {104, 0x00},
980 {98, 0x00},
981 {93, 0x00},
982 {88, 0x00},
983 {83, 0x00},
984 {78, 0x00},
985 },
986 /* 2.4GHz power gain idx table */
987 {
988 {110, 0x3f}, /* highest txpower */
989 {104, 0x3f},
990 {98, 0x3f},
991 {110, 0x3e},
992 {104, 0x3e},
993 {98, 0x3e},
994 {110, 0x3d},
995 {104, 0x3d},
996 {98, 0x3d},
997 {110, 0x3c},
998 {104, 0x3c},
999 {98, 0x3c},
1000 {110, 0x3b},
1001 {104, 0x3b},
1002 {98, 0x3b},
1003 {110, 0x3a},
1004 {104, 0x3a},
1005 {98, 0x3a},
1006 {110, 0x39},
1007 {104, 0x39},
1008 {98, 0x39},
1009 {110, 0x38},
1010 {104, 0x38},
1011 {98, 0x38},
1012 {110, 0x37},
1013 {104, 0x37},
1014 {98, 0x37},
1015 {110, 0x36},
1016 {104, 0x36},
1017 {98, 0x36},
1018 {110, 0x35},
1019 {104, 0x35},
1020 {98, 0x35},
1021 {110, 0x34},
1022 {104, 0x34},
1023 {98, 0x34},
1024 {110, 0x33},
1025 {104, 0x33},
1026 {98, 0x33},
1027 {110, 0x32},
1028 {104, 0x32},
1029 {98, 0x32},
1030 {110, 0x31},
1031 {104, 0x31},
1032 {98, 0x31},
1033 {110, 0x30},
1034 {104, 0x30},
1035 {98, 0x30},
1036 {110, 0x6},
1037 {104, 0x6},
1038 {98, 0x6},
1039 {110, 0x5},
1040 {104, 0x5},
1041 {98, 0x5},
1042 {110, 0x4},
1043 {104, 0x4},
1044 {98, 0x4},
1045 {110, 0x3},
1046 {104, 0x3},
1047 {98, 0x3},
1048 {110, 0x2},
1049 {104, 0x2},
1050 {98, 0x2},
1051 {110, 0x1},
1052 {104, 0x1},
1053 {98, 0x1},
1054 {110, 0x0},
1055 {104, 0x0},
1056 {98, 0x0},
1057 {97, 0},
1058 {96, 0},
1059 {95, 0},
1060 {94, 0},
1061 {93, 0},
1062 {92, 0},
1063 {91, 0},
1064 {90, 0},
1065 {89, 0},
1066 {88, 0},
1067 {87, 0},
1068 {86, 0},
1069 {85, 0},
1070 {84, 0},
1071 {83, 0},
1072 {82, 0},
1073 {81, 0},
1074 {80, 0},
1075 {79, 0},
1076 {78, 0},
1077 {77, 0},
1078 {76, 0},
1079 {75, 0},
1080 {74, 0},
1081 {73, 0},
1082 {72, 0},
1083 {71, 0},
1084 {70, 0},
1085 {69, 0},
1086 {68, 0},
1087 {67, 0},
1088 {66, 0},
1089 {65, 0},
1090 {64, 0},
1091 {63, 0},
1092 {62, 0},
1093 {61, 0},
1094 {60, 0},
1095 {59, 0},
1096 }
1097};
1098
1099static int
1100il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
1101 u8 ctrl_chan_high,
1102 struct il4965_tx_power_db *tx_power_tbl)
1103{
1104 u8 saturation_power;
1105 s32 target_power;
1106 s32 user_target_power;
1107 s32 power_limit;
1108 s32 current_temp;
1109 s32 reg_limit;
1110 s32 current_regulatory;
1111 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1112 int i;
1113 int c;
1114 const struct il_channel_info *ch_info = NULL;
1115 struct il_eeprom_calib_ch_info ch_eeprom_info;
1116 const struct il_eeprom_calib_measure *measurement;
1117 s16 voltage;
1118 s32 init_voltage;
1119 s32 voltage_compensation;
1120 s32 degrees_per_05db_num;
1121 s32 degrees_per_05db_denom;
1122 s32 factory_temp;
1123 s32 temperature_comp[2];
1124 s32 factory_gain_idx[2];
1125 s32 factory_actual_pwr[2];
1126 s32 power_idx;
1127
1128 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
1129 * are used for idxing into txpower table) */
1130 user_target_power = 2 * il->tx_power_user_lmt;
1131
1132 /* Get current (RXON) channel, band, width */
1133 D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
1134
1135 ch_info = il_get_channel_info(il, il->band, channel);
1136
1137 if (!il_is_channel_valid(ch_info))
1138 return -EINVAL;
1139
1140 /* get txatten group, used to select 1) thermal txpower adjustment
1141 * and 2) mimo txpower balance between Tx chains. */
1142 txatten_grp = il4965_get_tx_atten_grp(channel);
1143 if (txatten_grp < 0) {
1144 IL_ERR("Can't find txatten group for channel %d.\n", channel);
1145 return txatten_grp;
1146 }
1147
1148 D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
1149 txatten_grp);
1150
1151 if (is_ht40) {
1152 if (ctrl_chan_high)
1153 channel -= 2;
1154 else
1155 channel += 2;
1156 }
1157
1158 /* hardware txpower limits ...
1159 * saturation (clipping distortion) txpowers are in half-dBm */
1160 if (band)
1161 saturation_power = il->calib_info->saturation_power24;
1162 else
1163 saturation_power = il->calib_info->saturation_power52;
1164
1165 if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
1166 saturation_power > IL_TX_POWER_SATURATION_MAX) {
1167 if (band)
1168 saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
1169 else
1170 saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
1171 }
1172
1173 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1174 * max_power_avg values are in dBm, convert * 2 */
1175 if (is_ht40)
1176 reg_limit = ch_info->ht40_max_power_avg * 2;
1177 else
1178 reg_limit = ch_info->max_power_avg * 2;
1179
1180 if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
1181 (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
1182 if (band)
1183 reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
1184 else
1185 reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
1186 }
1187
1188 /* Interpolate txpower calibration values for this channel,
1189 * based on factory calibration tests on spaced channels. */
1190 il4965_interpolate_chan(il, channel, &ch_eeprom_info);
1191
1192 /* calculate tx gain adjustment based on power supply voltage */
1193 voltage = le16_to_cpu(il->calib_info->voltage);
1194 init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
1195 voltage_compensation =
1196 il4965_get_voltage_compensation(voltage, init_voltage);
1197
1198 D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
1199 voltage, voltage_compensation);
1200
1201 /* get current temperature (Celsius) */
1202 current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
1203 current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
1204 current_temp = KELVIN_TO_CELSIUS(current_temp);
1205
1206 /* select thermal txpower adjustment params, based on channel group
1207 * (same frequency group used for mimo txatten adjustment) */
1208 degrees_per_05db_num =
1209 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1210 degrees_per_05db_denom =
1211 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1212
1213 /* get per-chain txpower values from factory measurements */
1214 for (c = 0; c < 2; c++) {
1215 measurement = &ch_eeprom_info.measurements[c][1];
1216
1217 /* txgain adjustment (in half-dB steps) based on difference
1218 * between factory and current temperature */
1219 factory_temp = measurement->temperature;
1220 il4965_math_div_round((current_temp -
1221 factory_temp) * degrees_per_05db_denom,
1222 degrees_per_05db_num,
1223 &temperature_comp[c]);
1224
1225 factory_gain_idx[c] = measurement->gain_idx;
1226 factory_actual_pwr[c] = measurement->actual_pow;
1227
1228 D_TXPOWER("chain = %d\n", c);
1229 D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
1230 factory_temp, current_temp, temperature_comp[c]);
1231
1232 D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
1233 factory_actual_pwr[c]);
1234 }
1235
1236 /* for each of 33 bit-rates (including 1 for CCK) */
1237 for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
1238 u8 is_mimo_rate;
1239 union il4965_tx_power_dual_stream tx_power;
1240
1241 /* for mimo, reduce each chain's txpower by half
1242 * (3dB, 6 steps), so total output power is regulatory
1243 * compliant. */
1244 if (i & 0x8) {
1245 current_regulatory =
1246 reg_limit -
1247 IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1248 is_mimo_rate = 1;
1249 } else {
1250 current_regulatory = reg_limit;
1251 is_mimo_rate = 0;
1252 }
1253
1254 /* find txpower limit, either hardware or regulatory */
1255 power_limit = saturation_power - back_off_table[i];
1256 if (power_limit > current_regulatory)
1257 power_limit = current_regulatory;
1258
1259 /* reduce user's txpower request if necessary
1260 * for this rate on this channel */
1261 target_power = user_target_power;
1262 if (target_power > power_limit)
1263 target_power = power_limit;
1264
1265 D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
1266 saturation_power - back_off_table[i],
1267 current_regulatory, user_target_power, target_power);
1268
1269 /* for each of 2 Tx chains (radio transmitters) */
1270 for (c = 0; c < 2; c++) {
1271 s32 atten_value;
1272
1273 if (is_mimo_rate)
1274 atten_value =
1275 (s32) le32_to_cpu(il->card_alive_init.
1276 tx_atten[txatten_grp][c]);
1277 else
1278 atten_value = 0;
1279
1280 /* calculate idx; higher idx means lower txpower */
1281 power_idx =
1282 (u8) (factory_gain_idx[c] -
1283 (target_power - factory_actual_pwr[c]) -
1284 temperature_comp[c] - voltage_compensation +
1285 atten_value);
1286
1287/* D_TXPOWER("calculated txpower idx %d\n",
1288 power_idx); */
1289
1290 if (power_idx < get_min_power_idx(i, band))
1291 power_idx = get_min_power_idx(i, band);
1292
1293 /* adjust 5 GHz idx to support negative idxes */
1294 if (!band)
1295 power_idx += 9;
1296
1297 /* CCK, rate 32, reduce txpower for CCK */
1298 if (i == POWER_TBL_CCK_ENTRY)
1299 power_idx +=
1300 IL_TX_POWER_CCK_COMPENSATION_C_STEP;
1301
1302 /* stay within the table! */
1303 if (power_idx > 107) {
1304 IL_WARN("txpower idx %d > 107\n", power_idx);
1305 power_idx = 107;
1306 }
1307 if (power_idx < 0) {
1308 IL_WARN("txpower idx %d < 0\n", power_idx);
1309 power_idx = 0;
1310 }
1311
1312 /* fill txpower command for this rate/chain */
1313 tx_power.s.radio_tx_gain[c] =
1314 gain_table[band][power_idx].radio;
1315 tx_power.s.dsp_predis_atten[c] =
1316 gain_table[band][power_idx].dsp;
1317
1318 D_TXPOWER("chain %d mimo %d idx %d "
1319 "gain 0x%02x dsp %d\n", c, atten_value,
1320 power_idx, tx_power.s.radio_tx_gain[c],
1321 tx_power.s.dsp_predis_atten[c]);
1322 } /* for each chain */
1323
1324 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1325
1326 } /* for each rate */
1327
1328 return 0;
1329}
1330
1331/**
1332 * il4965_send_tx_power - Configure the TXPOWER level user limit
1333 *
1334 * Uses the active RXON for channel, band, and characteristics (ht40, high)
1335 * The power limit is taken from il->tx_power_user_lmt.
1336 */
1337static int
1338il4965_send_tx_power(struct il_priv *il)
1339{
1340 struct il4965_txpowertable_cmd cmd = { 0 };
1341 int ret;
1342 u8 band = 0;
1343 bool is_ht40 = false;
1344 u8 ctrl_chan_high = 0;
1345 struct il_rxon_context *ctx = &il->ctx;
1346
1347 if (WARN_ONCE
1348 (test_bit(S_SCAN_HW, &il->status),
1349 "TX Power requested while scanning!\n"))
1350 return -EAGAIN;
1351
1352 band = il->band == IEEE80211_BAND_2GHZ;
1353
1354 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1355
1356 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1357 ctrl_chan_high = 1;
1358
1359 cmd.band = band;
1360 cmd.channel = ctx->active.channel;
1361
1362 ret =
1363 il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
1364 is_ht40, ctrl_chan_high, &cmd.tx_power);
1365 if (ret)
1366 goto out;
1367
1368 ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
1369
1370out:
1371 return ret;
1372}
1373
1374static int
1375il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
1376{
1377 int ret = 0;
1378 struct il4965_rxon_assoc_cmd rxon_assoc;
1379 const struct il_rxon_cmd *rxon1 = &ctx->staging;
1380 const struct il_rxon_cmd *rxon2 = &ctx->active;
1381
1382 if (rxon1->flags == rxon2->flags &&
1383 rxon1->filter_flags == rxon2->filter_flags &&
1384 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
1385 rxon1->ofdm_ht_single_stream_basic_rates ==
1386 rxon2->ofdm_ht_single_stream_basic_rates &&
1387 rxon1->ofdm_ht_dual_stream_basic_rates ==
1388 rxon2->ofdm_ht_dual_stream_basic_rates &&
1389 rxon1->rx_chain == rxon2->rx_chain &&
1390 rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
1391 D_INFO("Using current RXON_ASSOC. Not resending.\n");
1392 return 0;
1393 }
1394
1395 rxon_assoc.flags = ctx->staging.flags;
1396 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1397 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1398 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1399 rxon_assoc.reserved = 0;
1400 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1401 ctx->staging.ofdm_ht_single_stream_basic_rates;
1402 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1403 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1404 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1405
1406 ret =
1407 il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
1408 &rxon_assoc, NULL);
1409
1410 return ret;
1411}
1412
1413static int
1414il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
1415{
1416 /* cast away the const for active_rxon in this function */
1417 struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
1418 int ret;
1419 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1420
1421 if (!il_is_alive(il))
1422 return -EBUSY;
1423
1424 if (!ctx->is_active)
1425 return 0;
1426
1427 /* always get timestamp with Rx frame */
1428 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1429
1430 ret = il_check_rxon_cmd(il, ctx);
1431 if (ret) {
1432 IL_ERR("Invalid RXON configuration. Not committing.\n");
1433 return -EINVAL;
1434 }
1435
1436 /*
1437 * receive commit_rxon request
1438 * abort any previous channel switch if still in process
1439 */
1440 if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
1441 il->switch_channel != ctx->staging.channel) {
1442 D_11H("abort channel switch on %d\n",
1443 le16_to_cpu(il->switch_channel));
1444 il_chswitch_done(il, false);
1445 }
1446
1447 /* If we don't need to send a full RXON, we can use
1448 * il_rxon_assoc_cmd which is used to reconfigure filter
1449 * and other flags for the current radio configuration. */
1450 if (!il_full_rxon_required(il, ctx)) {
1451 ret = il_send_rxon_assoc(il, ctx);
1452 if (ret) {
1453 IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
1454 return ret;
1455 }
1456
1457 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1458 il_print_rx_config_cmd(il, ctx);
1459 /*
1460 * We do not commit tx power settings while channel changing,
1461 * do it now if tx power changed.
1462 */
1463 il_set_tx_power(il, il->tx_power_next, false);
1464 return 0;
1465 }
1466
1467 /* If we are currently associated and the new config requires
1468 * an RXON_ASSOC and the new config wants the associated mask enabled,
1469 * we must clear the associated from the active configuration
1470 * before we apply the new config */
1471 if (il_is_associated_ctx(ctx) && new_assoc) {
1472 D_INFO("Toggling associated bit on current RXON\n");
1473 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1474
1475 ret =
1476 il_send_cmd_pdu(il, ctx->rxon_cmd,
1477 sizeof(struct il_rxon_cmd), active_rxon);
1478
1479 /* If the mask clearing failed then we set
1480 * active_rxon back to what it was previously */
1481 if (ret) {
1482 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1483 IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
1484 return ret;
1485 }
1486 il_clear_ucode_stations(il, ctx);
1487 il_restore_stations(il, ctx);
1488 ret = il4965_restore_default_wep_keys(il, ctx);
1489 if (ret) {
1490 IL_ERR("Failed to restore WEP keys (%d)\n", ret);
1491 return ret;
1492 }
1493 }
1494
1495 D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
1496 "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
1497 le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
1498
1499 il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
1500
1501 /* Apply the new configuration
1502 * RXON unassoc clears the station table in uCode so restoration of
1503 * stations is needed after it (the RXON command) completes
1504 */
1505 if (!new_assoc) {
1506 ret =
1507 il_send_cmd_pdu(il, ctx->rxon_cmd,
1508 sizeof(struct il_rxon_cmd), &ctx->staging);
1509 if (ret) {
1510 IL_ERR("Error setting new RXON (%d)\n", ret);
1511 return ret;
1512 }
1513 D_INFO("Return from !new_assoc RXON.\n");
1514 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1515 il_clear_ucode_stations(il, ctx);
1516 il_restore_stations(il, ctx);
1517 ret = il4965_restore_default_wep_keys(il, ctx);
1518 if (ret) {
1519 IL_ERR("Failed to restore WEP keys (%d)\n", ret);
1520 return ret;
1521 }
1522 }
1523 if (new_assoc) {
1524 il->start_calib = 0;
1525 /* Apply the new configuration
1526 * RXON assoc doesn't clear the station table in uCode,
1527 */
1528 ret =
1529 il_send_cmd_pdu(il, ctx->rxon_cmd,
1530 sizeof(struct il_rxon_cmd), &ctx->staging);
1531 if (ret) {
1532 IL_ERR("Error setting new RXON (%d)\n", ret);
1533 return ret;
1534 }
1535 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1536 }
1537 il_print_rx_config_cmd(il, ctx);
1538
1539 il4965_init_sensitivity(il);
1540
1541 /* If we issue a new RXON command which required a tune then we must
1542 * send a new TXPOWER command or we won't be able to Tx any frames */
1543 ret = il_set_tx_power(il, il->tx_power_next, true);
1544 if (ret) {
1545 IL_ERR("Error sending TX power (%d)\n", ret);
1546 return ret;
1547 }
1548
1549 return 0;
1550}
1551
1552static int
1553il4965_hw_channel_switch(struct il_priv *il,
1554 struct ieee80211_channel_switch *ch_switch)
1555{
1556 struct il_rxon_context *ctx = &il->ctx;
1557 int rc;
1558 u8 band = 0;
1559 bool is_ht40 = false;
1560 u8 ctrl_chan_high = 0;
1561 struct il4965_channel_switch_cmd cmd;
1562 const struct il_channel_info *ch_info;
1563 u32 switch_time_in_usec, ucode_switch_time;
1564 u16 ch;
1565 u32 tsf_low;
1566 u8 switch_count;
1567 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1568 struct ieee80211_vif *vif = ctx->vif;
1569 band = il->band == IEEE80211_BAND_2GHZ;
1570
1571 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1572
1573 if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1574 ctrl_chan_high = 1;
1575
1576 cmd.band = band;
1577 cmd.expect_beacon = 0;
1578 ch = ch_switch->channel->hw_value;
1579 cmd.channel = cpu_to_le16(ch);
1580 cmd.rxon_flags = ctx->staging.flags;
1581 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1582 switch_count = ch_switch->count;
1583 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1584 /*
1585 * calculate the ucode channel switch time
1586 * adding TSF as one of the factor for when to switch
1587 */
1588 if (il->ucode_beacon_time > tsf_low && beacon_interval) {
1589 if (switch_count >
1590 ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
1591 switch_count -=
1592 (il->ucode_beacon_time - tsf_low) / beacon_interval;
1593 } else
1594 switch_count = 0;
1595 }
1596 if (switch_count <= 1)
1597 cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
1598 else {
1599 switch_time_in_usec =
1600 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1601 ucode_switch_time =
1602 il_usecs_to_beacons(il, switch_time_in_usec,
1603 beacon_interval);
1604 cmd.switch_time =
1605 il_add_beacon_time(il, il->ucode_beacon_time,
1606 ucode_switch_time, beacon_interval);
1607 }
1608 D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
1609 ch_info = il_get_channel_info(il, il->band, ch);
1610 if (ch_info)
1611 cmd.expect_beacon = il_is_channel_radar(ch_info);
1612 else {
1613 IL_ERR("invalid channel switch from %u to %u\n",
1614 ctx->active.channel, ch);
1615 return -EFAULT;
1616 }
1617
1618 rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
1619 &cmd.tx_power);
1620 if (rc) {
1621 D_11H("error:%d fill txpower_tbl\n", rc);
1622 return rc;
1623 }
1624
1625 return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1626}
1627
1628/**
1629 * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1630 */
1631static void
1632il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
1633 u16 byte_cnt)
1634{
1635 struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
1636 int txq_id = txq->q.id;
1637 int write_ptr = txq->q.write_ptr;
1638 int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
1639 __le16 bc_ent;
1640
1641 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1642
1643 bc_ent = cpu_to_le16(len & 0xFFF);
1644 /* Set up byte count within first 256 entries */
1645 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1646
1647 /* If within first 64 entries, duplicate at end */
1648 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1649 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1650 bc_ent;
1651}
1652
1653/**
1654 * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1655 * @stats: Provides the temperature reading from the uCode
1656 *
1657 * A return of <0 indicates bogus data in the stats
1658 */
1659static int
1660il4965_hw_get_temperature(struct il_priv *il)
1661{
1662 s32 temperature;
1663 s32 vt;
1664 s32 R1, R2, R3;
1665 u32 R4;
1666
1667 if (test_bit(S_TEMPERATURE, &il->status) &&
1668 (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
1669 D_TEMP("Running HT40 temperature calibration\n");
1670 R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
1671 R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
1672 R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
1673 R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
1674 } else {
1675 D_TEMP("Running temperature calibration\n");
1676 R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
1677 R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
1678 R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
1679 R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
1680 }
1681
1682 /*
1683 * Temperature is only 23 bits, so sign extend out to 32.
1684 *
1685 * NOTE If we haven't received a stats notification yet
1686 * with an updated temperature, use R4 provided to us in the
1687 * "initialize" ALIVE response.
1688 */
1689 if (!test_bit(S_TEMPERATURE, &il->status))
1690 vt = sign_extend32(R4, 23);
1691 else
1692 vt = sign_extend32(le32_to_cpu
1693 (il->_4965.stats.general.common.temperature),
1694 23);
1695
1696 D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1697
1698 if (R3 == R1) {
1699 IL_ERR("Calibration conflict R1 == R3\n");
1700 return -1;
1701 }
1702
1703 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1704 * Add offset to center the adjustment around 0 degrees Centigrade. */
1705 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1706 temperature /= (R3 - R1);
1707 temperature =
1708 (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1709
1710 D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
1711 KELVIN_TO_CELSIUS(temperature));
1712
1713 return temperature;
1714}
1715
1716/* Adjust Txpower only if temperature variance is greater than threshold. */
1717#define IL_TEMPERATURE_THRESHOLD 3
1718
1719/**
1720 * il4965_is_temp_calib_needed - determines if new calibration is needed
1721 *
1722 * If the temperature changed has changed sufficiently, then a recalibration
1723 * is needed.
1724 *
1725 * Assumes caller will replace il->last_temperature once calibration
1726 * executed.
1727 */
1728static int
1729il4965_is_temp_calib_needed(struct il_priv *il)
1730{
1731 int temp_diff;
1732
1733 if (!test_bit(S_STATS, &il->status)) {
1734 D_TEMP("Temperature not updated -- no stats.\n");
1735 return 0;
1736 }
1737
1738 temp_diff = il->temperature - il->last_temperature;
1739
1740 /* get absolute value */
1741 if (temp_diff < 0) {
1742 D_POWER("Getting cooler, delta %d\n", temp_diff);
1743 temp_diff = -temp_diff;
1744 } else if (temp_diff == 0)
1745 D_POWER("Temperature unchanged\n");
1746 else
1747 D_POWER("Getting warmer, delta %d\n", temp_diff);
1748
1749 if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
1750 D_POWER(" => thermal txpower calib not needed\n");
1751 return 0;
1752 }
1753
1754 D_POWER(" => thermal txpower calib needed\n");
1755
1756 return 1;
1757}
1758
1759static void
1760il4965_temperature_calib(struct il_priv *il)
1761{
1762 s32 temp;
1763
1764 temp = il4965_hw_get_temperature(il);
1765 if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1766 return;
1767
1768 if (il->temperature != temp) {
1769 if (il->temperature)
1770 D_TEMP("Temperature changed " "from %dC to %dC\n",
1771 KELVIN_TO_CELSIUS(il->temperature),
1772 KELVIN_TO_CELSIUS(temp));
1773 else
1774 D_TEMP("Temperature " "initialized to %dC\n",
1775 KELVIN_TO_CELSIUS(temp));
1776 }
1777
1778 il->temperature = temp;
1779 set_bit(S_TEMPERATURE, &il->status);
1780
1781 if (!il->disable_tx_power_cal &&
1782 unlikely(!test_bit(S_SCANNING, &il->status)) &&
1783 il4965_is_temp_calib_needed(il))
1784 queue_work(il->workqueue, &il->txpower_work);
1785}
1786
1787static u16
1788il4965_get_hcmd_size(u8 cmd_id, u16 len)
1789{
1790 switch (cmd_id) {
1791 case C_RXON:
1792 return (u16) sizeof(struct il4965_rxon_cmd);
1793 default:
1794 return len;
1795 }
1796}
1797
1798static u16
1799il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
1800{
1801 struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
1802 addsta->mode = cmd->mode;
1803 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1804 memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
1805 addsta->station_flags = cmd->station_flags;
1806 addsta->station_flags_msk = cmd->station_flags_msk;
1807 addsta->tid_disable_tx = cmd->tid_disable_tx;
1808 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1809 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1810 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1811 addsta->sleep_tx_count = cmd->sleep_tx_count;
1812 addsta->reserved1 = cpu_to_le16(0);
1813 addsta->reserved2 = cpu_to_le16(0);
1814
1815 return (u16) sizeof(struct il4965_addsta_cmd);
1816}
1817
1818static inline u32
1819il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
1820{
1821 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
1822}
1823
1824static inline u32
1825il4965_tx_status_to_mac80211(u32 status)
1826{
1827 status &= TX_STATUS_MSK;
1828
1829 switch (status) {
1830 case TX_STATUS_SUCCESS:
1831 case TX_STATUS_DIRECT_DONE:
1832 return IEEE80211_TX_STAT_ACK;
1833 case TX_STATUS_FAIL_DEST_PS:
1834 return IEEE80211_TX_STAT_TX_FILTERED;
1835 default:
1836 return 0;
1837 }
1838}
1839
1840static inline bool
1841il4965_is_tx_success(u32 status)
1842{
1843 status &= TX_STATUS_MSK;
1844 return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
1845}
1846
1847/**
1848 * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
1849 */
1850static int
1851il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
1852 struct il4965_tx_resp *tx_resp, int txq_id,
1853 u16 start_idx)
1854{
1855 u16 status;
1856 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
1857 struct ieee80211_tx_info *info = NULL;
1858 struct ieee80211_hdr *hdr = NULL;
1859 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1860 int i, sh, idx;
1861 u16 seq;
1862 if (agg->wait_for_ba)
1863 D_TX_REPLY("got tx response w/o block-ack\n");
1864
1865 agg->frame_count = tx_resp->frame_count;
1866 agg->start_idx = start_idx;
1867 agg->rate_n_flags = rate_n_flags;
1868 agg->bitmap = 0;
1869
1870 /* num frames attempted by Tx command */
1871 if (agg->frame_count == 1) {
1872 /* Only one frame was attempted; no block-ack will arrive */
1873 status = le16_to_cpu(frame_status[0].status);
1874 idx = start_idx;
1875
1876 D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
1877 agg->frame_count, agg->start_idx, idx);
1878
1879 info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
1880 info->status.rates[0].count = tx_resp->failure_frame + 1;
1881 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1882 info->flags |= il4965_tx_status_to_mac80211(status);
1883 il4965_hwrate_to_tx_control(il, rate_n_flags, info);
1884
1885 D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
1886 tx_resp->failure_frame);
1887 D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
1888
1889 agg->wait_for_ba = 0;
1890 } else {
1891 /* Two or more frames were attempted; expect block-ack */
1892 u64 bitmap = 0;
1893 int start = agg->start_idx;
1894
1895 /* Construct bit-map of pending frames within Tx win */
1896 for (i = 0; i < agg->frame_count; i++) {
1897 u16 sc;
1898 status = le16_to_cpu(frame_status[i].status);
1899 seq = le16_to_cpu(frame_status[i].sequence);
1900 idx = SEQ_TO_IDX(seq);
1901 txq_id = SEQ_TO_QUEUE(seq);
1902
1903 if (status &
1904 (AGG_TX_STATE_FEW_BYTES_MSK |
1905 AGG_TX_STATE_ABORT_MSK))
1906 continue;
1907
1908 D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
1909 agg->frame_count, txq_id, idx);
1910
1911 hdr = il_tx_queue_get_hdr(il, txq_id, idx);
1912 if (!hdr) {
1913 IL_ERR("BUG_ON idx doesn't point to valid skb"
1914 " idx=%d, txq_id=%d\n", idx, txq_id);
1915 return -1;
1916 }
1917
1918 sc = le16_to_cpu(hdr->seq_ctrl);
1919 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1920 IL_ERR("BUG_ON idx doesn't match seq control"
1921 " idx=%d, seq_idx=%d, seq=%d\n", idx,
1922 SEQ_TO_SN(sc), hdr->seq_ctrl);
1923 return -1;
1924 }
1925
1926 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
1927 SEQ_TO_SN(sc));
1928
1929 sh = idx - start;
1930 if (sh > 64) {
1931 sh = (start - idx) + 0xff;
1932 bitmap = bitmap << sh;
1933 sh = 0;
1934 start = idx;
1935 } else if (sh < -64)
1936 sh = 0xff - (start - idx);
1937 else if (sh < 0) {
1938 sh = start - idx;
1939 start = idx;
1940 bitmap = bitmap << sh;
1941 sh = 0;
1942 }
1943 bitmap |= 1ULL << sh;
1944 D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
1945 (unsigned long long)bitmap);
1946 }
1947
1948 agg->bitmap = bitmap;
1949 agg->start_idx = start;
1950 D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
1951 agg->frame_count, agg->start_idx,
1952 (unsigned long long)agg->bitmap);
1953
1954 if (bitmap)
1955 agg->wait_for_ba = 1;
1956 }
1957 return 0;
1958}
1959
1960static u8
1961il4965_find_station(struct il_priv *il, const u8 * addr)
1962{
1963 int i;
1964 int start = 0;
1965 int ret = IL_INVALID_STATION;
1966 unsigned long flags;
1967
1968 if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
1969 start = IL_STA_ID;
1970
1971 if (is_broadcast_ether_addr(addr))
1972 return il->ctx.bcast_sta_id;
1973
1974 spin_lock_irqsave(&il->sta_lock, flags);
1975 for (i = start; i < il->hw_params.max_stations; i++)
1976 if (il->stations[i].used &&
1977 (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
1978 ret = i;
1979 goto out;
1980 }
1981
1982 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
1983
1984out:
1985 /*
1986 * It may be possible that more commands interacting with stations
1987 * arrive before we completed processing the adding of
1988 * station
1989 */
1990 if (ret != IL_INVALID_STATION &&
1991 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
1992 ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
1993 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
1994 IL_ERR("Requested station info for sta %d before ready.\n",
1995 ret);
1996 ret = IL_INVALID_STATION;
1997 }
1998 spin_unlock_irqrestore(&il->sta_lock, flags);
1999 return ret;
2000}
2001
2002static int
2003il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2004{
2005 if (il->iw_mode == NL80211_IFTYPE_STATION) {
2006 return IL_AP_ID;
2007 } else {
2008 u8 *da = ieee80211_get_DA(hdr);
2009 return il4965_find_station(il, da);
2010 }
2011}
2012
2013/**
2014 * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
2015 */
2016static void
2017il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2018{
2019 struct il_rx_pkt *pkt = rxb_addr(rxb);
2020 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2021 int txq_id = SEQ_TO_QUEUE(sequence);
2022 int idx = SEQ_TO_IDX(sequence);
2023 struct il_tx_queue *txq = &il->txq[txq_id];
2024 struct ieee80211_hdr *hdr;
2025 struct ieee80211_tx_info *info;
2026 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2027 u32 status = le32_to_cpu(tx_resp->u.status);
2028 int uninitialized_var(tid);
2029 int sta_id;
2030 int freed;
2031 u8 *qc = NULL;
2032 unsigned long flags;
2033
2034 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
2035 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
2036 "is out of range [0-%d] %d %d\n", txq_id, idx,
2037 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
2038 return;
2039 }
2040
2041 txq->time_stamp = jiffies;
2042 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
2043 memset(&info->status, 0, sizeof(info->status));
2044
2045 hdr = il_tx_queue_get_hdr(il, txq_id, idx);
2046 if (ieee80211_is_data_qos(hdr->frame_control)) {
2047 qc = ieee80211_get_qos_ctl(hdr);
2048 tid = qc[0] & 0xf;
2049 }
2050
2051 sta_id = il4965_get_ra_sta_id(il, hdr);
2052 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
2053 IL_ERR("Station not known\n");
2054 return;
2055 }
2056
2057 spin_lock_irqsave(&il->sta_lock, flags);
2058 if (txq->sched_retry) {
2059 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
2060 struct il_ht_agg *agg = NULL;
2061 WARN_ON(!qc);
2062
2063 agg = &il->stations[sta_id].tid[tid].agg;
2064
2065 il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
2066
2067 /* check if BAR is needed */
2068 if ((tx_resp->frame_count == 1) &&
2069 !il4965_is_tx_success(status))
2070 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2071
2072 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2073 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2074 D_TX_REPLY("Retry scheduler reclaim scd_ssn "
2075 "%d idx %d\n", scd_ssn, idx);
2076 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2077 if (qc)
2078 il4965_free_tfds_in_queue(il, sta_id, tid,
2079 freed);
2080
2081 if (il->mac80211_registered &&
2082 il_queue_space(&txq->q) > txq->q.low_mark &&
2083 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2084 il_wake_queue(il, txq);
2085 }
2086 } else {
2087 info->status.rates[0].count = tx_resp->failure_frame + 1;
2088 info->flags |= il4965_tx_status_to_mac80211(status);
2089 il4965_hwrate_to_tx_control(il,
2090 le32_to_cpu(tx_resp->rate_n_flags),
2091 info);
2092
2093 D_TX_REPLY("TXQ %d status %s (0x%08x) "
2094 "rate_n_flags 0x%x retries %d\n", txq_id,
2095 il4965_get_tx_fail_reason(status), status,
2096 le32_to_cpu(tx_resp->rate_n_flags),
2097 tx_resp->failure_frame);
2098
2099 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2100 if (qc && likely(sta_id != IL_INVALID_STATION))
2101 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2102 else if (sta_id == IL_INVALID_STATION)
2103 D_TX_REPLY("Station not known\n");
2104
2105 if (il->mac80211_registered &&
2106 il_queue_space(&txq->q) > txq->q.low_mark)
2107 il_wake_queue(il, txq);
2108 }
2109 if (qc && likely(sta_id != IL_INVALID_STATION))
2110 il4965_txq_check_empty(il, sta_id, tid, txq_id);
2111
2112 il4965_check_abort_status(il, tx_resp->frame_count, status);
2113
2114 spin_unlock_irqrestore(&il->sta_lock, flags);
2115}
2116
2117static void
2118il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
2119{
2120 struct il_rx_pkt *pkt = rxb_addr(rxb);
2121 struct il4965_beacon_notif *beacon = (void *)pkt->u.raw;
2122 u8 rate __maybe_unused =
2123 il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
2124
2125 D_RX("beacon status %#x, retries:%d ibssmgr:%d "
2126 "tsf:0x%.8x%.8x rate:%d\n",
2127 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
2128 beacon->beacon_notify_hdr.failure_frame,
2129 le32_to_cpu(beacon->ibss_mgr_status),
2130 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
2131
2132 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
2133}
2134
2135/* Set up 4965-specific Rx frame reply handlers */
2136static void
2137il4965_handler_setup(struct il_priv *il)
2138{
2139 /* Legacy Rx frames */
2140 il->handlers[N_RX] = il4965_hdl_rx;
2141 /* Tx response */
2142 il->handlers[C_TX] = il4965_hdl_tx;
2143 il->handlers[N_BEACON] = il4965_hdl_beacon;
2144}
2145
2146static struct il_hcmd_ops il4965_hcmd = {
2147 .rxon_assoc = il4965_send_rxon_assoc,
2148 .commit_rxon = il4965_commit_rxon,
2149 .set_rxon_chain = il4965_set_rxon_chain,
2150};
2151
2152static void
2153il4965_post_scan(struct il_priv *il)
2154{
2155 struct il_rxon_context *ctx = &il->ctx;
2156
2157 /*
2158 * Since setting the RXON may have been deferred while
2159 * performing the scan, fire one off if needed
2160 */
2161 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2162 il_commit_rxon(il, ctx);
2163}
2164
2165static void
2166il4965_post_associate(struct il_priv *il)
2167{
2168 struct il_rxon_context *ctx = &il->ctx;
2169 struct ieee80211_vif *vif = ctx->vif;
2170 struct ieee80211_conf *conf = NULL;
2171 int ret = 0;
2172
2173 if (!vif || !il->is_open)
2174 return;
2175
2176 if (test_bit(S_EXIT_PENDING, &il->status))
2177 return;
2178
2179 il_scan_cancel_timeout(il, 200);
2180
2181 conf = &il->hw->conf;
2182
2183 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2184 il_commit_rxon(il, ctx);
2185
2186 ret = il_send_rxon_timing(il, ctx);
2187 if (ret)
2188 IL_WARN("RXON timing - " "Attempting to continue.\n");
2189
2190 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2191
2192 il_set_rxon_ht(il, &il->current_ht_config);
2193
2194 if (il->cfg->ops->hcmd->set_rxon_chain)
2195 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
2196
2197 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
2198
2199 D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
2200 vif->bss_conf.beacon_int);
2201
2202 if (vif->bss_conf.use_short_preamble)
2203 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2204 else
2205 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2206
2207 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2208 if (vif->bss_conf.use_short_slot)
2209 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2210 else
2211 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2212 }
2213
2214 il_commit_rxon(il, ctx);
2215
2216 D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
2217 ctx->active.bssid_addr);
2218
2219 switch (vif->type) {
2220 case NL80211_IFTYPE_STATION:
2221 break;
2222 case NL80211_IFTYPE_ADHOC:
2223 il4965_send_beacon_cmd(il);
2224 break;
2225 default:
2226 IL_ERR("%s Should not be called in %d mode\n", __func__,
2227 vif->type);
2228 break;
2229 }
2230
2231 /* the chain noise calibration will enabled PM upon completion
2232 * If chain noise has already been run, then we need to enable
2233 * power management here */
2234 if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
2235 il_power_update_mode(il, false);
2236
2237 /* Enable Rx differential gain and sensitivity calibrations */
2238 il4965_chain_noise_reset(il);
2239 il->start_calib = 1;
2240}
2241
2242static void
2243il4965_config_ap(struct il_priv *il)
2244{
2245 struct il_rxon_context *ctx = &il->ctx;
2246 struct ieee80211_vif *vif = ctx->vif;
2247 int ret = 0;
2248
2249 lockdep_assert_held(&il->mutex);
2250
2251 if (test_bit(S_EXIT_PENDING, &il->status))
2252 return;
2253
2254 /* The following should be done only at AP bring up */
2255 if (!il_is_associated_ctx(ctx)) {
2256
2257 /* RXON - unassoc (to set timing command) */
2258 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2259 il_commit_rxon(il, ctx);
2260
2261 /* RXON Timing */
2262 ret = il_send_rxon_timing(il, ctx);
2263 if (ret)
2264 IL_WARN("RXON timing failed - "
2265 "Attempting to continue.\n");
2266
2267 /* AP has all antennas */
2268 il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
2269 il_set_rxon_ht(il, &il->current_ht_config);
2270 if (il->cfg->ops->hcmd->set_rxon_chain)
2271 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
2272
2273 ctx->staging.assoc_id = 0;
2274
2275 if (vif->bss_conf.use_short_preamble)
2276 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2277 else
2278 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2279
2280 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2281 if (vif->bss_conf.use_short_slot)
2282 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2283 else
2284 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2285 }
2286 /* need to send beacon cmd before committing assoc RXON! */
2287 il4965_send_beacon_cmd(il);
2288 /* restore RXON assoc */
2289 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2290 il_commit_rxon(il, ctx);
2291 }
2292 il4965_send_beacon_cmd(il);
2293}
2294
2295static struct il_hcmd_utils_ops il4965_hcmd_utils = {
2296 .get_hcmd_size = il4965_get_hcmd_size,
2297 .build_addsta_hcmd = il4965_build_addsta_hcmd,
2298 .request_scan = il4965_request_scan,
2299 .post_scan = il4965_post_scan,
2300};
2301
2302static struct il_lib_ops il4965_lib = {
2303 .set_hw_params = il4965_hw_set_hw_params,
2304 .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
2305 .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
2306 .txq_free_tfd = il4965_hw_txq_free_tfd,
2307 .txq_init = il4965_hw_tx_queue_init,
2308 .handler_setup = il4965_handler_setup,
2309 .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
2310 .init_alive_start = il4965_init_alive_start,
2311 .load_ucode = il4965_load_bsm,
2312 .dump_nic_error_log = il4965_dump_nic_error_log,
2313 .dump_fh = il4965_dump_fh,
2314 .set_channel_switch = il4965_hw_channel_switch,
2315 .apm_ops = {
2316 .init = il_apm_init,
2317 .config = il4965_nic_config,
2318 },
2319 .eeprom_ops = {
2320 .regulatory_bands = {
2321 EEPROM_REGULATORY_BAND_1_CHANNELS,
2322 EEPROM_REGULATORY_BAND_2_CHANNELS,
2323 EEPROM_REGULATORY_BAND_3_CHANNELS,
2324 EEPROM_REGULATORY_BAND_4_CHANNELS,
2325 EEPROM_REGULATORY_BAND_5_CHANNELS,
2326 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2327 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
2328 .acquire_semaphore = il4965_eeprom_acquire_semaphore,
2329 .release_semaphore = il4965_eeprom_release_semaphore,
2330 },
2331 .send_tx_power = il4965_send_tx_power,
2332 .update_chain_flags = il4965_update_chain_flags,
2333 .temp_ops = {
2334 .temperature = il4965_temperature_calib,
2335 },
2336#ifdef CONFIG_IWLEGACY_DEBUGFS
2337 .debugfs_ops = {
2338 .rx_stats_read = il4965_ucode_rx_stats_read,
2339 .tx_stats_read = il4965_ucode_tx_stats_read,
2340 .general_stats_read = il4965_ucode_general_stats_read,
2341 },
2342#endif
2343};
2344
2345static const struct il_legacy_ops il4965_legacy_ops = {
2346 .post_associate = il4965_post_associate,
2347 .config_ap = il4965_config_ap,
2348 .manage_ibss_station = il4965_manage_ibss_station,
2349 .update_bcast_stations = il4965_update_bcast_stations,
2350};
2351
2352struct ieee80211_ops il4965_hw_ops = {
2353 .tx = il4965_mac_tx,
2354 .start = il4965_mac_start,
2355 .stop = il4965_mac_stop,
2356 .add_interface = il_mac_add_interface,
2357 .remove_interface = il_mac_remove_interface,
2358 .change_interface = il_mac_change_interface,
2359 .config = il_mac_config,
2360 .configure_filter = il4965_configure_filter,
2361 .set_key = il4965_mac_set_key,
2362 .update_tkip_key = il4965_mac_update_tkip_key,
2363 .conf_tx = il_mac_conf_tx,
2364 .reset_tsf = il_mac_reset_tsf,
2365 .bss_info_changed = il_mac_bss_info_changed,
2366 .ampdu_action = il4965_mac_ampdu_action,
2367 .hw_scan = il_mac_hw_scan,
2368 .sta_add = il4965_mac_sta_add,
2369 .sta_remove = il_mac_sta_remove,
2370 .channel_switch = il4965_mac_channel_switch,
2371 .tx_last_beacon = il_mac_tx_last_beacon,
2372};
2373
2374static const struct il_ops il4965_ops = {
2375 .lib = &il4965_lib,
2376 .hcmd = &il4965_hcmd,
2377 .utils = &il4965_hcmd_utils,
2378 .led = &il4965_led_ops,
2379 .legacy = &il4965_legacy_ops,
2380 .ieee80211_ops = &il4965_hw_ops,
2381};
2382
2383static struct il_base_params il4965_base_params = {
2384 .eeprom_size = IL4965_EEPROM_IMG_SIZE,
2385 .num_of_queues = IL49_NUM_QUEUES,
2386 .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
2387 .pll_cfg_val = 0,
2388 .set_l0s = true,
2389 .use_bsm = true,
2390 .led_compensation = 61,
2391 .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
2392 .wd_timeout = IL_DEF_WD_TIMEOUT,
2393 .temperature_kelvin = true,
2394 .ucode_tracing = true,
2395 .sensitivity_calib_by_driver = true,
2396 .chain_noise_calib_by_driver = true,
2397};
2398
2399struct il_cfg il4965_cfg = {
2400 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2401 .fw_name_pre = IL4965_FW_PRE,
2402 .ucode_api_max = IL4965_UCODE_API_MAX,
2403 .ucode_api_min = IL4965_UCODE_API_MIN,
2404 .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
2405 .valid_tx_ant = ANT_AB,
2406 .valid_rx_ant = ANT_ABC,
2407 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2408 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2409 .ops = &il4965_ops,
2410 .mod_params = &il4965_mod_params,
2411 .base_params = &il4965_base_params,
2412 .led_mode = IL_LED_BLINK,
2413 /*
2414 * Force use of chains B and C for scan RX on 5 GHz band
2415 * because the device has off-channel reception on chain A.
2416 */
2417 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2418};
2419
2420/* Module firmware */
2421MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
new file mode 100644
index 000000000000..74472314bc37
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -0,0 +1,1309 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __il_4965_h__
31#define __il_4965_h__
32
33struct il_rx_queue;
34struct il_rx_buf;
35struct il_rx_pkt;
36struct il_tx_queue;
37struct il_rxon_context;
38
39/* configuration for the _4965 devices */
40extern struct il_cfg il4965_cfg;
41
42extern struct il_mod_params il4965_mod_params;
43
44extern struct ieee80211_ops il4965_hw_ops;
45
46/* tx queue */
47void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
48 int freed);
49
50/* RXON */
51void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
52
53/* uCode */
54int il4965_verify_ucode(struct il_priv *il);
55
56/* lib */
57void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
58
59void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
60int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
61int il4965_hw_nic_init(struct il_priv *il);
62int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
63
64/* rx */
65void il4965_rx_queue_restock(struct il_priv *il);
66void il4965_rx_replenish(struct il_priv *il);
67void il4965_rx_replenish_now(struct il_priv *il);
68void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
69int il4965_rxq_stop(struct il_priv *il);
70int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
71void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
72void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
73void il4965_rx_handle(struct il_priv *il);
74
75/* tx */
76void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
77int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
78 dma_addr_t addr, u16 len, u8 reset, u8 pad);
79int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
80void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
81 struct ieee80211_tx_info *info);
82int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
83int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
84 struct ieee80211_sta *sta, u16 tid, u16 * ssn);
85int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
86 struct ieee80211_sta *sta, u16 tid);
87int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
88void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
89int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
90void il4965_hw_txq_ctx_free(struct il_priv *il);
91int il4965_txq_ctx_alloc(struct il_priv *il);
92void il4965_txq_ctx_reset(struct il_priv *il);
93void il4965_txq_ctx_stop(struct il_priv *il);
94void il4965_txq_set_sched(struct il_priv *il, u32 mask);
95
96/*
97 * Acquire il->lock before calling this function !
98 */
99void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
100/**
101 * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
102 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
103 * @scd_retry: (1) Indicates queue will be used in aggregation mode
104 *
105 * NOTE: Acquire il->lock before calling this function !
106 */
107void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
108 int tx_fifo_id, int scd_retry);
109
110u8 il4965_toggle_tx_ant(struct il_priv *il, u8 ant_idx, u8 valid);
111
112/* rx */
113void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
114bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
115void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
116void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
117
118/* scan */
119int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
120
121/* station mgmt */
122int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
123 bool add);
124
125/* hcmd */
126int il4965_send_beacon_cmd(struct il_priv *il);
127
128#ifdef CONFIG_IWLEGACY_DEBUG
129const char *il4965_get_tx_fail_reason(u32 status);
130#else
131static inline const char *
132il4965_get_tx_fail_reason(u32 status)
133{
134 return "";
135}
136#endif
137
138/* station management */
139int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
140int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
141 const u8 *addr, u8 *sta_id_r);
142int il4965_remove_default_wep_key(struct il_priv *il,
143 struct il_rxon_context *ctx,
144 struct ieee80211_key_conf *key);
145int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
146 struct ieee80211_key_conf *key);
147int il4965_restore_default_wep_keys(struct il_priv *il,
148 struct il_rxon_context *ctx);
149int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
150 struct ieee80211_key_conf *key, u8 sta_id);
151int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
152 struct ieee80211_key_conf *key, u8 sta_id);
153void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
154 struct ieee80211_key_conf *keyconf,
155 struct ieee80211_sta *sta, u32 iv32,
156 u16 *phase1key);
157int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
158int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
159 int tid, u16 ssn);
160int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
161 int tid);
162void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
163int il4965_update_bcast_stations(struct il_priv *il);
164
165/* rate */
166static inline u8
167il4965_hw_get_rate(__le32 rate_n_flags)
168{
169 return le32_to_cpu(rate_n_flags) & 0xFF;
170}
171
172static inline __le32
173il4965_hw_set_rate_n_flags(u8 rate, u32 flags)
174{
175 return cpu_to_le32(flags | (u32) rate);
176}
177
178/* eeprom */
179void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
180int il4965_eeprom_acquire_semaphore(struct il_priv *il);
181void il4965_eeprom_release_semaphore(struct il_priv *il);
182int il4965_eeprom_check_version(struct il_priv *il);
183
184/* mac80211 handlers (for 4965) */
185void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
186int il4965_mac_start(struct ieee80211_hw *hw);
187void il4965_mac_stop(struct ieee80211_hw *hw);
188void il4965_configure_filter(struct ieee80211_hw *hw,
189 unsigned int changed_flags,
190 unsigned int *total_flags, u64 multicast);
191int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
192 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
193 struct ieee80211_key_conf *key);
194void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
195 struct ieee80211_vif *vif,
196 struct ieee80211_key_conf *keyconf,
197 struct ieee80211_sta *sta, u32 iv32,
198 u16 *phase1key);
199int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
200 enum ieee80211_ampdu_mlme_action action,
201 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
202 u8 buf_size);
203int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
204 struct ieee80211_sta *sta);
205void il4965_mac_channel_switch(struct ieee80211_hw *hw,
206 struct ieee80211_channel_switch *ch_switch);
207
208void il4965_led_enable(struct il_priv *il);
209
210/* EEPROM */
211#define IL4965_EEPROM_IMG_SIZE 1024
212
213/*
214 * uCode queue management definitions ...
215 * The first queue used for block-ack aggregation is #7 (4965 only).
216 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
217 */
218#define IL49_FIRST_AMPDU_QUEUE 7
219
220/* Sizes and addresses for instruction and data memory (SRAM) in
221 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
222#define IL49_RTC_INST_LOWER_BOUND (0x000000)
223#define IL49_RTC_INST_UPPER_BOUND (0x018000)
224
225#define IL49_RTC_DATA_LOWER_BOUND (0x800000)
226#define IL49_RTC_DATA_UPPER_BOUND (0x80A000)
227
228#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \
229 IL49_RTC_INST_LOWER_BOUND)
230#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \
231 IL49_RTC_DATA_LOWER_BOUND)
232
233#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
234#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
235
236/* Size of uCode instruction memory in bootstrap state machine */
237#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
238
239static inline int
240il4965_hw_valid_rtc_data_addr(u32 addr)
241{
242 return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
243 addr < IL49_RTC_DATA_UPPER_BOUND);
244}
245
246/********************* START TEMPERATURE *************************************/
247
248/**
249 * 4965 temperature calculation.
250 *
251 * The driver must calculate the device temperature before calculating
252 * a txpower setting (amplifier gain is temperature dependent). The
253 * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
254 * values used for the life of the driver, and one of which (R4) is the
255 * real-time temperature indicator.
256 *
257 * uCode provides all 4 values to the driver via the "initialize alive"
258 * notification (see struct il4965_init_alive_resp). After the runtime uCode
259 * image loads, uCode updates the R4 value via stats notifications
260 * (see N_STATS), which occur after each received beacon
261 * when associated, or can be requested via C_STATS.
262 *
263 * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
264 * must sign-extend to 32 bits before applying formula below.
265 *
266 * Formula:
267 *
268 * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
269 *
270 * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
271 * an additional correction, which should be centered around 0 degrees
272 * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
273 * centering the 97/100 correction around 0 degrees K.
274 *
275 * Add 273 to Kelvin value to find degrees Celsius, for comparing current
276 * temperature with factory-measured temperatures when calculating txpower
277 * settings.
278 */
279#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
280#define TEMPERATURE_CALIB_A_VAL 259
281
282/* Limit range of calculated temperature to be between these Kelvin values */
283#define IL_TX_POWER_TEMPERATURE_MIN (263)
284#define IL_TX_POWER_TEMPERATURE_MAX (410)
285
286#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
287 ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
288 (t) > IL_TX_POWER_TEMPERATURE_MAX)
289
290/********************* END TEMPERATURE ***************************************/
291
292/********************* START TXPOWER *****************************************/
293
294/**
295 * 4965 txpower calculations rely on information from three sources:
296 *
297 * 1) EEPROM
298 * 2) "initialize" alive notification
299 * 3) stats notifications
300 *
301 * EEPROM data consists of:
302 *
303 * 1) Regulatory information (max txpower and channel usage flags) is provided
304 * separately for each channel that can possibly supported by 4965.
305 * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
306 * (legacy) channels.
307 *
308 * See struct il4965_eeprom_channel for format, and struct il4965_eeprom
309 * for locations in EEPROM.
310 *
311 * 2) Factory txpower calibration information is provided separately for
312 * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
313 * but 5 GHz has several sub-bands.
314 *
315 * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
316 *
317 * See struct il4965_eeprom_calib_info (and the tree of structures
318 * contained within it) for format, and struct il4965_eeprom for
319 * locations in EEPROM.
320 *
321 * "Initialization alive" notification (see struct il4965_init_alive_resp)
322 * consists of:
323 *
324 * 1) Temperature calculation parameters.
325 *
326 * 2) Power supply voltage measurement.
327 *
328 * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
329 *
330 * Statistics notifications deliver:
331 *
332 * 1) Current values for temperature param R4.
333 */
334
335/**
336 * To calculate a txpower setting for a given desired target txpower, channel,
337 * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
338 * support MIMO and transmit diversity), driver must do the following:
339 *
340 * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
341 * Do not exceed regulatory limit; reduce target txpower if necessary.
342 *
343 * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
344 * 2 transmitters will be used simultaneously; driver must reduce the
345 * regulatory limit by 3 dB (half-power) for each transmitter, so the
346 * combined total output of the 2 transmitters is within regulatory limits.
347 *
348 *
349 * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
350 * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
351 * reduce target txpower if necessary.
352 *
353 * Backoff values below are in 1/2 dB units (equivalent to steps in
354 * txpower gain tables):
355 *
356 * OFDM 6 - 36 MBit: 10 steps (5 dB)
357 * OFDM 48 MBit: 15 steps (7.5 dB)
358 * OFDM 54 MBit: 17 steps (8.5 dB)
359 * OFDM 60 MBit: 20 steps (10 dB)
360 * CCK all rates: 10 steps (5 dB)
361 *
362 * Backoff values apply to saturation txpower on a per-transmitter basis;
363 * when using MIMO (2 transmitters), each transmitter uses the same
364 * saturation level provided in EEPROM, and the same backoff values;
365 * no reduction (such as with regulatory txpower limits) is required.
366 *
367 * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
368 * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
369 * factory measurement for ht40 channels.
370 *
371 * The result of this step is the final target txpower. The rest of
372 * the steps figure out the proper settings for the device to achieve
373 * that target txpower.
374 *
375 *
376 * 3) Determine (EEPROM) calibration sub band for the target channel, by
377 * comparing against first and last channels in each sub band
378 * (see struct il4965_eeprom_calib_subband_info).
379 *
380 *
381 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
382 * referencing the 2 factory-measured (sample) channels within the sub band.
383 *
384 * Interpolation is based on difference between target channel's frequency
385 * and the sample channels' frequencies. Since channel numbers are based
386 * on frequency (5 MHz between each channel number), this is equivalent
387 * to interpolating based on channel number differences.
388 *
389 * Note that the sample channels may or may not be the channels at the
390 * edges of the sub band. The target channel may be "outside" of the
391 * span of the sampled channels.
392 *
393 * Driver may choose the pair (for 2 Tx chains) of measurements (see
394 * struct il4965_eeprom_calib_ch_info) for which the actual measured
395 * txpower comes closest to the desired txpower. Usually, though,
396 * the middle set of measurements is closest to the regulatory limits,
397 * and is therefore a good choice for all txpower calculations (this
398 * assumes that high accuracy is needed for maximizing legal txpower,
399 * while lower txpower configurations do not need as much accuracy).
400 *
401 * Driver should interpolate both members of the chosen measurement pair,
402 * i.e. for both Tx chains (radio transmitters), unless the driver knows
403 * that only one of the chains will be used (e.g. only one tx antenna
404 * connected, but this should be unusual). The rate scaling algorithm
405 * switches antennas to find best performance, so both Tx chains will
406 * be used (although only one at a time) even for non-MIMO transmissions.
407 *
408 * Driver should interpolate factory values for temperature, gain table
409 * idx, and actual power. The power amplifier detector values are
410 * not used by the driver.
411 *
412 * Sanity check: If the target channel happens to be one of the sample
413 * channels, the results should agree with the sample channel's
414 * measurements!
415 *
416 *
417 * 5) Find difference between desired txpower and (interpolated)
418 * factory-measured txpower. Using (interpolated) factory gain table idx
419 * (shown elsewhere) as a starting point, adjust this idx lower to
420 * increase txpower, or higher to decrease txpower, until the target
421 * txpower is reached. Each step in the gain table is 1/2 dB.
422 *
423 * For example, if factory measured txpower is 16 dBm, and target txpower
424 * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
425 * by 3 dB.
426 *
427 *
428 * 6) Find difference between current device temperature and (interpolated)
429 * factory-measured temperature for sub-band. Factory values are in
430 * degrees Celsius. To calculate current temperature, see comments for
431 * "4965 temperature calculation".
432 *
433 * If current temperature is higher than factory temperature, driver must
434 * increase gain (lower gain table idx), and vice verse.
435 *
436 * Temperature affects gain differently for different channels:
437 *
438 * 2.4 GHz all channels: 3.5 degrees per half-dB step
439 * 5 GHz channels 34-43: 4.5 degrees per half-dB step
440 * 5 GHz channels >= 44: 4.0 degrees per half-dB step
441 *
442 * NOTE: Temperature can increase rapidly when transmitting, especially
443 * with heavy traffic at high txpowers. Driver should update
444 * temperature calculations often under these conditions to
445 * maintain strong txpower in the face of rising temperature.
446 *
447 *
448 * 7) Find difference between current power supply voltage indicator
449 * (from "initialize alive") and factory-measured power supply voltage
450 * indicator (EEPROM).
451 *
452 * If the current voltage is higher (indicator is lower) than factory
453 * voltage, gain should be reduced (gain table idx increased) by:
454 *
455 * (eeprom - current) / 7
456 *
457 * If the current voltage is lower (indicator is higher) than factory
458 * voltage, gain should be increased (gain table idx decreased) by:
459 *
460 * 2 * (current - eeprom) / 7
461 *
462 * If number of idx steps in either direction turns out to be > 2,
463 * something is wrong ... just use 0.
464 *
465 * NOTE: Voltage compensation is independent of band/channel.
466 *
467 * NOTE: "Initialize" uCode measures current voltage, which is assumed
468 * to be constant after this initial measurement. Voltage
469 * compensation for txpower (number of steps in gain table)
470 * may be calculated once and used until the next uCode bootload.
471 *
472 *
473 * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
474 * adjust txpower for each transmitter chain, so txpower is balanced
475 * between the two chains. There are 5 pairs of tx_atten[group][chain]
476 * values in "initialize alive", one pair for each of 5 channel ranges:
477 *
478 * Group 0: 5 GHz channel 34-43
479 * Group 1: 5 GHz channel 44-70
480 * Group 2: 5 GHz channel 71-124
481 * Group 3: 5 GHz channel 125-200
482 * Group 4: 2.4 GHz all channels
483 *
484 * Add the tx_atten[group][chain] value to the idx for the target chain.
485 * The values are signed, but are in pairs of 0 and a non-negative number,
486 * so as to reduce gain (if necessary) of the "hotter" channel. This
487 * avoids any need to double-check for regulatory compliance after
488 * this step.
489 *
490 *
491 * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
492 * value to the idx:
493 *
494 * Hardware rev B: 9 steps (4.5 dB)
495 * Hardware rev C: 5 steps (2.5 dB)
496 *
497 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
498 * bits [3:2], 1 = B, 2 = C.
499 *
500 * NOTE: This compensation is in addition to any saturation backoff that
501 * might have been applied in an earlier step.
502 *
503 *
504 * 10) Select the gain table, based on band (2.4 vs 5 GHz).
505 *
506 * Limit the adjusted idx to stay within the table!
507 *
508 *
509 * 11) Read gain table entries for DSP and radio gain, place into appropriate
510 * location(s) in command (struct il4965_txpowertable_cmd).
511 */
512
513/**
514 * When MIMO is used (2 transmitters operating simultaneously), driver should
515 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
516 * for the device. That is, use half power for each transmitter, so total
517 * txpower is within regulatory limits.
518 *
519 * The value "6" represents number of steps in gain table to reduce power 3 dB.
520 * Each step is 1/2 dB.
521 */
522#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
523
524/**
525 * CCK gain compensation.
526 *
527 * When calculating txpowers for CCK, after making sure that the target power
528 * is within regulatory and saturation limits, driver must additionally
529 * back off gain by adding these values to the gain table idx.
530 *
531 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
532 * bits [3:2], 1 = B, 2 = C.
533 */
534#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
535#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
536
537/*
538 * 4965 power supply voltage compensation for txpower
539 */
540#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7)
541
542/**
543 * Gain tables.
544 *
545 * The following tables contain pair of values for setting txpower, i.e.
546 * gain settings for the output of the device's digital signal processor (DSP),
547 * and for the analog gain structure of the transmitter.
548 *
549 * Each entry in the gain tables represents a step of 1/2 dB. Note that these
550 * are *relative* steps, not indications of absolute output power. Output
551 * power varies with temperature, voltage, and channel frequency, and also
552 * requires consideration of average power (to satisfy regulatory constraints),
553 * and peak power (to avoid distortion of the output signal).
554 *
555 * Each entry contains two values:
556 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
557 * linear value that multiplies the output of the digital signal processor,
558 * before being sent to the analog radio.
559 * 2) Radio gain. This sets the analog gain of the radio Tx path.
560 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
561 *
562 * EEPROM contains factory calibration data for txpower. This maps actual
563 * measured txpower levels to gain settings in the "well known" tables
564 * below ("well-known" means here that both factory calibration *and* the
565 * driver work with the same table).
566 *
567 * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
568 * has an extension (into negative idxes), in case the driver needs to
569 * boost power setting for high device temperatures (higher than would be
570 * present during factory calibration). A 5 Ghz EEPROM idx of "40"
571 * corresponds to the 49th entry in the table used by the driver.
572 */
573#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */
574#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
575
576/**
577 * 2.4 GHz gain table
578 *
579 * Index Dsp gain Radio gain
580 * 0 110 0x3f (highest gain)
581 * 1 104 0x3f
582 * 2 98 0x3f
583 * 3 110 0x3e
584 * 4 104 0x3e
585 * 5 98 0x3e
586 * 6 110 0x3d
587 * 7 104 0x3d
588 * 8 98 0x3d
589 * 9 110 0x3c
590 * 10 104 0x3c
591 * 11 98 0x3c
592 * 12 110 0x3b
593 * 13 104 0x3b
594 * 14 98 0x3b
595 * 15 110 0x3a
596 * 16 104 0x3a
597 * 17 98 0x3a
598 * 18 110 0x39
599 * 19 104 0x39
600 * 20 98 0x39
601 * 21 110 0x38
602 * 22 104 0x38
603 * 23 98 0x38
604 * 24 110 0x37
605 * 25 104 0x37
606 * 26 98 0x37
607 * 27 110 0x36
608 * 28 104 0x36
609 * 29 98 0x36
610 * 30 110 0x35
611 * 31 104 0x35
612 * 32 98 0x35
613 * 33 110 0x34
614 * 34 104 0x34
615 * 35 98 0x34
616 * 36 110 0x33
617 * 37 104 0x33
618 * 38 98 0x33
619 * 39 110 0x32
620 * 40 104 0x32
621 * 41 98 0x32
622 * 42 110 0x31
623 * 43 104 0x31
624 * 44 98 0x31
625 * 45 110 0x30
626 * 46 104 0x30
627 * 47 98 0x30
628 * 48 110 0x6
629 * 49 104 0x6
630 * 50 98 0x6
631 * 51 110 0x5
632 * 52 104 0x5
633 * 53 98 0x5
634 * 54 110 0x4
635 * 55 104 0x4
636 * 56 98 0x4
637 * 57 110 0x3
638 * 58 104 0x3
639 * 59 98 0x3
640 * 60 110 0x2
641 * 61 104 0x2
642 * 62 98 0x2
643 * 63 110 0x1
644 * 64 104 0x1
645 * 65 98 0x1
646 * 66 110 0x0
647 * 67 104 0x0
648 * 68 98 0x0
649 * 69 97 0
650 * 70 96 0
651 * 71 95 0
652 * 72 94 0
653 * 73 93 0
654 * 74 92 0
655 * 75 91 0
656 * 76 90 0
657 * 77 89 0
658 * 78 88 0
659 * 79 87 0
660 * 80 86 0
661 * 81 85 0
662 * 82 84 0
663 * 83 83 0
664 * 84 82 0
665 * 85 81 0
666 * 86 80 0
667 * 87 79 0
668 * 88 78 0
669 * 89 77 0
670 * 90 76 0
671 * 91 75 0
672 * 92 74 0
673 * 93 73 0
674 * 94 72 0
675 * 95 71 0
676 * 96 70 0
677 * 97 69 0
678 * 98 68 0
679 */
680
681/**
682 * 5 GHz gain table
683 *
684 * Index Dsp gain Radio gain
685 * -9 123 0x3F (highest gain)
686 * -8 117 0x3F
687 * -7 110 0x3F
688 * -6 104 0x3F
689 * -5 98 0x3F
690 * -4 110 0x3E
691 * -3 104 0x3E
692 * -2 98 0x3E
693 * -1 110 0x3D
694 * 0 104 0x3D
695 * 1 98 0x3D
696 * 2 110 0x3C
697 * 3 104 0x3C
698 * 4 98 0x3C
699 * 5 110 0x3B
700 * 6 104 0x3B
701 * 7 98 0x3B
702 * 8 110 0x3A
703 * 9 104 0x3A
704 * 10 98 0x3A
705 * 11 110 0x39
706 * 12 104 0x39
707 * 13 98 0x39
708 * 14 110 0x38
709 * 15 104 0x38
710 * 16 98 0x38
711 * 17 110 0x37
712 * 18 104 0x37
713 * 19 98 0x37
714 * 20 110 0x36
715 * 21 104 0x36
716 * 22 98 0x36
717 * 23 110 0x35
718 * 24 104 0x35
719 * 25 98 0x35
720 * 26 110 0x34
721 * 27 104 0x34
722 * 28 98 0x34
723 * 29 110 0x33
724 * 30 104 0x33
725 * 31 98 0x33
726 * 32 110 0x32
727 * 33 104 0x32
728 * 34 98 0x32
729 * 35 110 0x31
730 * 36 104 0x31
731 * 37 98 0x31
732 * 38 110 0x30
733 * 39 104 0x30
734 * 40 98 0x30
735 * 41 110 0x25
736 * 42 104 0x25
737 * 43 98 0x25
738 * 44 110 0x24
739 * 45 104 0x24
740 * 46 98 0x24
741 * 47 110 0x23
742 * 48 104 0x23
743 * 49 98 0x23
744 * 50 110 0x22
745 * 51 104 0x18
746 * 52 98 0x18
747 * 53 110 0x17
748 * 54 104 0x17
749 * 55 98 0x17
750 * 56 110 0x16
751 * 57 104 0x16
752 * 58 98 0x16
753 * 59 110 0x15
754 * 60 104 0x15
755 * 61 98 0x15
756 * 62 110 0x14
757 * 63 104 0x14
758 * 64 98 0x14
759 * 65 110 0x13
760 * 66 104 0x13
761 * 67 98 0x13
762 * 68 110 0x12
763 * 69 104 0x08
764 * 70 98 0x08
765 * 71 110 0x07
766 * 72 104 0x07
767 * 73 98 0x07
768 * 74 110 0x06
769 * 75 104 0x06
770 * 76 98 0x06
771 * 77 110 0x05
772 * 78 104 0x05
773 * 79 98 0x05
774 * 80 110 0x04
775 * 81 104 0x04
776 * 82 98 0x04
777 * 83 110 0x03
778 * 84 104 0x03
779 * 85 98 0x03
780 * 86 110 0x02
781 * 87 104 0x02
782 * 88 98 0x02
783 * 89 110 0x01
784 * 90 104 0x01
785 * 91 98 0x01
786 * 92 110 0x00
787 * 93 104 0x00
788 * 94 98 0x00
789 * 95 93 0x00
790 * 96 88 0x00
791 * 97 83 0x00
792 * 98 78 0x00
793 */
794
795/**
796 * Sanity checks and default values for EEPROM regulatory levels.
797 * If EEPROM values fall outside MIN/MAX range, use default values.
798 *
799 * Regulatory limits refer to the maximum average txpower allowed by
800 * regulatory agencies in the geographies in which the device is meant
801 * to be operated. These limits are SKU-specific (i.e. geography-specific),
802 * and channel-specific; each channel has an individual regulatory limit
803 * listed in the EEPROM.
804 *
805 * Units are in half-dBm (i.e. "34" means 17 dBm).
806 */
807#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34)
808#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34)
809#define IL_TX_POWER_REGULATORY_MIN (0)
810#define IL_TX_POWER_REGULATORY_MAX (34)
811
812/**
813 * Sanity checks and default values for EEPROM saturation levels.
814 * If EEPROM values fall outside MIN/MAX range, use default values.
815 *
816 * Saturation is the highest level that the output power amplifier can produce
817 * without significant clipping distortion. This is a "peak" power level.
818 * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
819 * require differing amounts of backoff, relative to their average power output,
820 * in order to avoid clipping distortion.
821 *
822 * Driver must make sure that it is violating neither the saturation limit,
823 * nor the regulatory limit, when calculating Tx power settings for various
824 * rates.
825 *
826 * Units are in half-dBm (i.e. "38" means 19 dBm).
827 */
828#define IL_TX_POWER_DEFAULT_SATURATION_24 (38)
829#define IL_TX_POWER_DEFAULT_SATURATION_52 (38)
830#define IL_TX_POWER_SATURATION_MIN (20)
831#define IL_TX_POWER_SATURATION_MAX (50)
832
833/**
834 * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
835 * and thermal Txpower calibration.
836 *
837 * When calculating txpower, driver must compensate for current device
838 * temperature; higher temperature requires higher gain. Driver must calculate
839 * current temperature (see "4965 temperature calculation"), then compare vs.
840 * factory calibration temperature in EEPROM; if current temperature is higher
841 * than factory temperature, driver must *increase* gain by proportions shown
842 * in table below. If current temperature is lower than factory, driver must
843 * *decrease* gain.
844 *
845 * Different frequency ranges require different compensation, as shown below.
846 */
847/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
848#define CALIB_IL_TX_ATTEN_GR1_FCH 34
849#define CALIB_IL_TX_ATTEN_GR1_LCH 43
850
851/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
852#define CALIB_IL_TX_ATTEN_GR2_FCH 44
853#define CALIB_IL_TX_ATTEN_GR2_LCH 70
854
855/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
856#define CALIB_IL_TX_ATTEN_GR3_FCH 71
857#define CALIB_IL_TX_ATTEN_GR3_LCH 124
858
859/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
860#define CALIB_IL_TX_ATTEN_GR4_FCH 125
861#define CALIB_IL_TX_ATTEN_GR4_LCH 200
862
863/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
864#define CALIB_IL_TX_ATTEN_GR5_FCH 1
865#define CALIB_IL_TX_ATTEN_GR5_LCH 20
866
867enum {
868 CALIB_CH_GROUP_1 = 0,
869 CALIB_CH_GROUP_2 = 1,
870 CALIB_CH_GROUP_3 = 2,
871 CALIB_CH_GROUP_4 = 3,
872 CALIB_CH_GROUP_5 = 4,
873 CALIB_CH_GROUP_MAX
874};
875
876/********************* END TXPOWER *****************************************/
877
878/**
879 * Tx/Rx Queues
880 *
881 * Most communication between driver and 4965 is via queues of data buffers.
882 * For example, all commands that the driver issues to device's embedded
883 * controller (uCode) are via the command queue (one of the Tx queues). All
884 * uCode command responses/replies/notifications, including Rx frames, are
885 * conveyed from uCode to driver via the Rx queue.
886 *
887 * Most support for these queues, including handshake support, resides in
888 * structures in host DRAM, shared between the driver and the device. When
889 * allocating this memory, the driver must make sure that data written by
890 * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
891 * cache memory), so DRAM and cache are consistent, and the device can
892 * immediately see changes made by the driver.
893 *
894 * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
895 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
896 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
897 */
898#define IL49_NUM_FIFOS 7
899#define IL49_CMD_FIFO_NUM 4
900#define IL49_NUM_QUEUES 16
901#define IL49_NUM_AMPDU_QUEUES 8
902
903/**
904 * struct il4965_schedq_bc_tbl
905 *
906 * Byte Count table
907 *
908 * Each Tx queue uses a byte-count table containing 320 entries:
909 * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
910 * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
911 * max Tx win is 64 TFDs).
912 *
913 * When driver sets up a new TFD, it must also enter the total byte count
914 * of the frame to be transmitted into the corresponding entry in the byte
915 * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver
916 * must duplicate the byte count entry in corresponding idx 256-319.
917 *
918 * padding puts each byte count table on a 1024-byte boundary;
919 * 4965 assumes tables are separated by 1024 bytes.
920 */
921struct il4965_scd_bc_tbl {
922 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
923 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
924} __packed;
925
926#define IL4965_RTC_INST_LOWER_BOUND (0x000000)
927
928/* RSSI to dBm */
929#define IL4965_RSSI_OFFSET 44
930
931/* PCI registers */
932#define PCI_CFG_RETRY_TIMEOUT 0x041
933
934/* PCI register values */
935#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
936#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
937
938#define IL4965_DEFAULT_TX_RETRY 15
939
940/* EEPROM */
941#define IL4965_FIRST_AMPDU_QUEUE 10
942
943/* Calibration */
944void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
945void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
946void il4965_init_sensitivity(struct il_priv *il);
947void il4965_reset_run_time_calib(struct il_priv *il);
948void il4965_calib_free_results(struct il_priv *il);
949
950/* Debug */
951#ifdef CONFIG_IWLEGACY_DEBUGFS
952ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
953 size_t count, loff_t *ppos);
954ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
955 size_t count, loff_t *ppos);
956ssize_t il4965_ucode_general_stats_read(struct file *file,
957 char __user *user_buf, size_t count,
958 loff_t *ppos);
959#endif
960
961/****************************/
962/* Flow Handler Definitions */
963/****************************/
964
965/**
966 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
967 * Addresses are offsets from device's PCI hardware base address.
968 */
969#define FH49_MEM_LOWER_BOUND (0x1000)
970#define FH49_MEM_UPPER_BOUND (0x2000)
971
972/**
973 * Keep-Warm (KW) buffer base address.
974 *
975 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
976 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
977 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
978 * from going into a power-savings mode that would cause higher DRAM latency,
979 * and possible data over/under-runs, before all Tx/Rx is complete.
980 *
981 * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
982 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
983 * automatically invokes keep-warm accesses when normal accesses might not
984 * be sufficient to maintain fast DRAM response.
985 *
986 * Bit fields:
987 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
988 */
989#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C)
990
991/**
992 * TFD Circular Buffers Base (CBBC) addresses
993 *
994 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
995 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
996 * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04
997 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
998 * aligned (address bits 0-7 must be 0).
999 *
1000 * Bit fields in each pointer register:
1001 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
1002 */
1003#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
1004#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10)
1005
1006/* Find TFD CB base pointer for given queue (range 0-15). */
1007#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
1008
1009/**
1010 * Rx SRAM Control and Status Registers (RSCSR)
1011 *
1012 * These registers provide handshake between driver and 4965 for the Rx queue
1013 * (this queue handles *all* command responses, notifications, Rx data, etc.
1014 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
1015 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
1016 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
1017 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
1018 * mapping between RBDs and RBs.
1019 *
1020 * Driver must allocate host DRAM memory for the following, and set the
1021 * physical address of each into 4965 registers:
1022 *
1023 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
1024 * entries (although any power of 2, up to 4096, is selectable by driver).
1025 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
1026 * (typically 4K, although 8K or 16K are also selectable by driver).
1027 * Driver sets up RB size and number of RBDs in the CB via Rx config
1028 * register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
1029 *
1030 * Bit fields within one RBD:
1031 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
1032 *
1033 * Driver sets physical address [35:8] of base of RBD circular buffer
1034 * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
1035 *
1036 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
1037 * (RBs) have been filled, via a "write pointer", actually the idx of
1038 * the RB's corresponding RBD within the circular buffer. Driver sets
1039 * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
1040 *
1041 * Bit fields in lower dword of Rx status buffer (upper dword not used
1042 * by driver; see struct il4965_shared, val0):
1043 * 31-12: Not used by driver
1044 * 11- 0: Index of last filled Rx buffer descriptor
1045 * (4965 writes, driver reads this value)
1046 *
1047 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
1048 * enter pointers to these RBs into contiguous RBD circular buffer entries,
1049 * and update the 4965's "write" idx register,
1050 * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
1051 *
1052 * This "write" idx corresponds to the *next* RBD that the driver will make
1053 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
1054 * the circular buffer. This value should initially be 0 (before preparing any
1055 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
1056 * wrap back to 0 at the end of the circular buffer (but don't wrap before
1057 * "read" idx has advanced past 1! See below).
1058 * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
1059 *
1060 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
1061 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
1062 * to tell the driver the idx of the latest filled RBD. The driver must
1063 * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
1064 *
1065 * The driver must also internally keep track of a third idx, which is the
1066 * next RBD to process. When receiving an Rx interrupt, driver should process
1067 * all filled but unprocessed RBs up to, but not including, the RB
1068 * corresponding to the "read" idx. For example, if "read" idx becomes "1",
1069 * driver may process the RB pointed to by RBD 0. Depending on volume of
1070 * traffic, there may be many RBs to process.
1071 *
1072 * If read idx == write idx, 4965 thinks there is no room to put new data.
1073 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
1074 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
1075 * and "read" idxes; that is, make sure that there are no more than 254
1076 * buffers waiting to be filled.
1077 */
1078#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0)
1079#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
1080#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND)
1081
1082/**
1083 * Physical base address of 8-byte Rx Status buffer.
1084 * Bit fields:
1085 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
1086 */
1087#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0)
1088
1089/**
1090 * Physical base address of Rx Buffer Descriptor Circular Buffer.
1091 * Bit fields:
1092 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
1093 */
1094#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004)
1095
1096/**
1097 * Rx write pointer (idx, really!).
1098 * Bit fields:
1099 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
1100 * NOTE: For 256-entry circular buffer, use only bits [7:0].
1101 */
1102#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008)
1103#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
1104
1105/**
1106 * Rx Config/Status Registers (RCSR)
1107 * Rx Config Reg for channel 0 (only channel used)
1108 *
1109 * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
1110 * normal operation (see bit fields).
1111 *
1112 * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
1113 * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for
1114 * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
1115 *
1116 * Bit fields:
1117 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1118 * '10' operate normally
1119 * 29-24: reserved
1120 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
1121 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
1122 * 19-18: reserved
1123 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
1124 * '10' 12K, '11' 16K.
1125 * 15-14: reserved
1126 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
1127 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
1128 * typical value 0x10 (about 1/2 msec)
1129 * 3- 0: reserved
1130 */
1131#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
1132#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0)
1133#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND)
1134
1135#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0)
1136
1137#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
1138#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
1139#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
1140#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
1141#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
1142#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */
1143
1144#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
1145#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
1146#define RX_RB_TIMEOUT (0x10)
1147
1148#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
1149#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
1150#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
1151
1152#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
1153#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
1154#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
1155#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
1156
1157#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
1158#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
1159#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
1160
1161/**
1162 * Rx Shared Status Registers (RSSR)
1163 *
1164 * After stopping Rx DMA channel (writing 0 to
1165 * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
1166 * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
1167 *
1168 * Bit fields:
1169 * 24: 1 = Channel 0 is idle
1170 *
1171 * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
1172 * contain default values that should not be altered by the driver.
1173 */
1174#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40)
1175#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
1176
1177#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND)
1178#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004)
1179#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
1180 (FH49_MEM_RSSR_LOWER_BOUND + 0x008)
1181
1182#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
1183
1184#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
1185
1186/* TFDB Area - TFDs buffer table */
1187#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
1188#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900)
1189#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958)
1190#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
1191#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
1192
1193/**
1194 * Transmit DMA Channel Control/Status Registers (TCSR)
1195 *
1196 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
1197 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
1198 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
1199 *
1200 * To use a Tx DMA channel, driver must initialize its
1201 * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
1202 *
1203 * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1204 * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
1205 *
1206 * All other bits should be 0.
1207 *
1208 * Bit fields:
1209 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1210 * '10' operate normally
1211 * 29- 4: Reserved, set to "0"
1212 * 3: Enable internal DMA requests (1, normal operation), disable (0)
1213 * 2- 0: Reserved, set to "0"
1214 */
1215#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
1216#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60)
1217
1218/* Find Control/Status reg for given Tx DMA/FIFO channel */
1219#define FH49_TCSR_CHNL_NUM (7)
1220#define FH50_TCSR_CHNL_NUM (8)
1221
1222/* TCSR: tx_config register values */
1223#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
1224 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
1225#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
1226 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
1227#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
1228 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
1229
1230#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
1231#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
1232
1233#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
1234#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
1235
1236#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
1237#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
1238#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
1239
1240#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
1241#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
1242#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
1243
1244#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
1245#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
1246#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
1247
1248#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
1249#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
1250#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
1251
1252#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
1253#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
1254
1255/**
1256 * Tx Shared Status Registers (TSSR)
1257 *
1258 * After stopping Tx DMA channel (writing 0 to
1259 * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
1260 * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
1261 * (channel's buffers empty | no pending requests).
1262 *
1263 * Bit fields:
1264 * 31-24: 1 = Channel buffers empty (channel 7:0)
1265 * 23-16: 1 = No pending requests (channel 7:0)
1266 */
1267#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0)
1268#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0)
1269
1270#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010)
1271
1272/**
1273 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
1274 * 31: Indicates an address error when accessed to internal memory
1275 * uCode/driver must write "1" in order to clear this flag
1276 * 30: Indicates that Host did not send the expected number of dwords to FH
1277 * uCode/driver must write "1" in order to clear this flag
1278 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
1279 * command was received from the scheduler while the TRB was already full
1280 * with previous command
1281 * uCode/driver must write "1" in order to clear this flag
1282 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
1283 * bit is set, it indicates that the FH has received a full indication
1284 * from the RTC TxFIFO and the current value of the TxCredit counter was
1285 * not equal to zero. This mean that the credit mechanism was not
1286 * synchronized to the TxFIFO status
1287 * uCode/driver must write "1" in order to clear this flag
1288 */
1289#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018)
1290
1291#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
1292
1293/* Tx service channels */
1294#define FH49_SRVC_CHNL (9)
1295#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8)
1296#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
1297#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
1298 (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
1299
1300#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98)
1301/* Instruct FH to increment the retry count of a packet when
1302 * it is brought from the memory to TX-FIFO
1303 */
1304#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
1305
1306/* Keep Warm Size */
1307#define IL_KW_SIZE 0x1000 /* 4k */
1308
1309#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index aef65cd47661..05bd375cb845 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,4 +1,4 @@
1config IWLWIFI_LEGACY 1config IWLEGACY
2 tristate 2 tristate
3 select FW_LOADER 3 select FW_LOADER
4 select NEW_LEDS 4 select NEW_LEDS
@@ -7,13 +7,13 @@ config IWLWIFI_LEGACY
7 select MAC80211_LEDS 7 select MAC80211_LEDS
8 8
9menu "Debugging Options" 9menu "Debugging Options"
10 depends on IWLWIFI_LEGACY 10 depends on IWLEGACY
11 11
12config IWLWIFI_LEGACY_DEBUG 12config IWLEGACY_DEBUG
13 bool "Enable full debugging output in 4965 and 3945 drivers" 13 bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
14 depends on IWLWIFI_LEGACY 14 depends on IWLEGACY
15 ---help--- 15 ---help---
16 This option will enable debug tracing output for the iwlwifilegacy 16 This option will enable debug tracing output for the iwlegacy
17 drivers. 17 drivers.
18 18
19 This will result in the kernel module being ~100k larger. You can 19 This will result in the kernel module being ~100k larger. You can
@@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG
29 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level 29 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
30 30
31 You can find the list of debug mask values in: 31 You can find the list of debug mask values in:
32 drivers/net/wireless/iwlwifilegacy/iwl-debug.h 32 drivers/net/wireless/iwlegacy/common.h
33 33
34 If this is your first time using this driver, you should say Y here 34 If this is your first time using this driver, you should say Y here
35 as the debug information can assist others in helping you resolve 35 as the debug information can assist others in helping you resolve
36 any problems you may encounter. 36 any problems you may encounter.
37 37
38config IWLWIFI_LEGACY_DEBUGFS 38config IWLEGACY_DEBUGFS
39 bool "4965 and 3945 debugfs support" 39 bool "iwlegacy (iwl 3945/4965) debugfs support"
40 depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS 40 depends on IWLEGACY && MAC80211_DEBUGFS
41 ---help--- 41 ---help---
42 Enable creation of debugfs files for the iwlwifilegacy drivers. This 42 Enable creation of debugfs files for the iwlegacy drivers. This
43 is a low-impact option that allows getting insight into the 43 is a low-impact option that allows getting insight into the
44 driver's state at runtime. 44 driver's state at runtime.
45 45
46config IWLWIFI_LEGACY_DEVICE_TRACING
47 bool "iwlwifilegacy legacy device access tracing"
48 depends on IWLWIFI_LEGACY
49 depends on EVENT_TRACING
50 help
51 Say Y here to trace all commands, including TX frames and IO
52 accesses, sent to the device. If you say yes, iwlwifilegacy will
53 register with the ftrace framework for event tracing and dump
54 all this information to the ringbuffer, you may need to
55 increase the ringbuffer size. See the ftrace documentation
56 for more information.
57
58 When tracing is not enabled, this option still has some
59 (though rather small) overhead.
60
61 If unsure, say Y so we can help you better when problems
62 occur.
63endmenu 46endmenu
64 47
65config IWL4965 48config IWL4965
66 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" 49 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
67 depends on PCI && MAC80211 50 depends on PCI && MAC80211
68 select IWLWIFI_LEGACY 51 select IWLEGACY
69 ---help--- 52 ---help---
70 This option enables support for 53 This option enables support for
71 54
@@ -93,7 +76,7 @@ config IWL4965
93config IWL3945 76config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 77 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
95 depends on PCI && MAC80211 78 depends on PCI && MAC80211
96 select IWLWIFI_LEGACY 79 select IWLEGACY
97 ---help--- 80 ---help---
98 Select to build the driver supporting the: 81 Select to build the driver supporting the:
99 82
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
index d56aeb38c211..c985a01a0731 100644
--- a/drivers/net/wireless/iwlegacy/Makefile
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -1,25 +1,17 @@
1obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o 1obj-$(CONFIG_IWLEGACY) += iwlegacy.o
2iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o 2iwlegacy-objs := common.o
3iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o 3iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
4iwl-legacy-objs += iwl-scan.o iwl-led.o
5iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
6iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
7 4
8iwl-legacy-objs += $(iwl-legacy-m) 5iwlegacy-objs += $(iwlegacy-m)
9
10CFLAGS_iwl-devtrace.o := -I$(src)
11 6
12# 4965 7# 4965
13obj-$(CONFIG_IWL4965) += iwl4965.o 8obj-$(CONFIG_IWL4965) += iwl4965.o
14iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o 9iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
15iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o 10iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
16iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
17iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
18iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
19 11
20# 3945 12# 3945
21obj-$(CONFIG_IWL3945) += iwl3945.o 13obj-$(CONFIG_IWL3945) += iwl3945.o
22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o 14iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
23iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o 15iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
24 16
25ccflags-y += -D__CHECK_ENDIAN__ 17ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 89904054473f..25dd7d28d022 100644
--- a/drivers/net/wireless/iwlegacy/iwl-commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -60,100 +60,96 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions.
67 */
68 63
69#ifndef __iwl_legacy_commands_h__ 64#ifndef __il_commands_h__
70#define __iwl_legacy_commands_h__ 65#define __il_commands_h__
71 66
72struct iwl_priv; 67#include <linux/ieee80211.h>
73 68
74/* uCode version contains 4 values: Major/Minor/API/Serial */ 69struct il_priv;
75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
79 70
71/* uCode version contains 4 values: Major/Minor/API/Serial */
72#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
73#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
74#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
75#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
80 76
81/* Tx rates */ 77/* Tx rates */
82#define IWL_CCK_RATES 4 78#define IL_CCK_RATES 4
83#define IWL_OFDM_RATES 8 79#define IL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES) 80#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES)
85 81
86enum { 82enum {
87 REPLY_ALIVE = 0x1, 83 N_ALIVE = 0x1,
88 REPLY_ERROR = 0x2, 84 N_ERROR = 0x2,
89 85
90 /* RXON and QOS commands */ 86 /* RXON and QOS commands */
91 REPLY_RXON = 0x10, 87 C_RXON = 0x10,
92 REPLY_RXON_ASSOC = 0x11, 88 C_RXON_ASSOC = 0x11,
93 REPLY_QOS_PARAM = 0x13, 89 C_QOS_PARAM = 0x13,
94 REPLY_RXON_TIMING = 0x14, 90 C_RXON_TIMING = 0x14,
95 91
96 /* Multi-Station support */ 92 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18, 93 C_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19, 94 C_REM_STA = 0x19,
99 95
100 /* Security */ 96 /* Security */
101 REPLY_WEPKEY = 0x20, 97 C_WEPKEY = 0x20,
102 98
103 /* RX, TX, LEDs */ 99 /* RX, TX, LEDs */
104 REPLY_3945_RX = 0x1b, /* 3945 only */ 100 N_3945_RX = 0x1b, /* 3945 only */
105 REPLY_TX = 0x1c, 101 C_TX = 0x1c,
106 REPLY_RATE_SCALE = 0x47, /* 3945 only */ 102 C_RATE_SCALE = 0x47, /* 3945 only */
107 REPLY_LEDS_CMD = 0x48, 103 C_LEDS = 0x48,
108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ 104 C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */
109 105
110 /* 802.11h related */ 106 /* 802.11h related */
111 REPLY_CHANNEL_SWITCH = 0x72, 107 C_CHANNEL_SWITCH = 0x72,
112 CHANNEL_SWITCH_NOTIFICATION = 0x73, 108 N_CHANNEL_SWITCH = 0x73,
113 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, 109 C_SPECTRUM_MEASUREMENT = 0x74,
114 SPECTRUM_MEASURE_NOTIFICATION = 0x75, 110 N_SPECTRUM_MEASUREMENT = 0x75,
115 111
116 /* Power Management */ 112 /* Power Management */
117 POWER_TABLE_CMD = 0x77, 113 C_POWER_TBL = 0x77,
118 PM_SLEEP_NOTIFICATION = 0x7A, 114 N_PM_SLEEP = 0x7A,
119 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, 115 N_PM_DEBUG_STATS = 0x7B,
120 116
121 /* Scan commands and notifications */ 117 /* Scan commands and notifications */
122 REPLY_SCAN_CMD = 0x80, 118 C_SCAN = 0x80,
123 REPLY_SCAN_ABORT_CMD = 0x81, 119 C_SCAN_ABORT = 0x81,
124 SCAN_START_NOTIFICATION = 0x82, 120 N_SCAN_START = 0x82,
125 SCAN_RESULTS_NOTIFICATION = 0x83, 121 N_SCAN_RESULTS = 0x83,
126 SCAN_COMPLETE_NOTIFICATION = 0x84, 122 N_SCAN_COMPLETE = 0x84,
127 123
128 /* IBSS/AP commands */ 124 /* IBSS/AP commands */
129 BEACON_NOTIFICATION = 0x90, 125 N_BEACON = 0x90,
130 REPLY_TX_BEACON = 0x91, 126 C_TX_BEACON = 0x91,
131 127
132 /* Miscellaneous commands */ 128 /* Miscellaneous commands */
133 REPLY_TX_PWR_TABLE_CMD = 0x97, 129 C_TX_PWR_TBL = 0x97,
134 130
135 /* Bluetooth device coexistence config command */ 131 /* Bluetooth device coexistence config command */
136 REPLY_BT_CONFIG = 0x9b, 132 C_BT_CONFIG = 0x9b,
137 133
138 /* Statistics */ 134 /* Statistics */
139 REPLY_STATISTICS_CMD = 0x9c, 135 C_STATS = 0x9c,
140 STATISTICS_NOTIFICATION = 0x9d, 136 N_STATS = 0x9d,
141 137
142 /* RF-KILL commands and notifications */ 138 /* RF-KILL commands and notifications */
143 CARD_STATE_NOTIFICATION = 0xa1, 139 N_CARD_STATE = 0xa1,
144 140
145 /* Missed beacons notification */ 141 /* Missed beacons notification */
146 MISSED_BEACONS_NOTIFICATION = 0xa2, 142 N_MISSED_BEACONS = 0xa2,
147 143
148 REPLY_CT_KILL_CONFIG_CMD = 0xa4, 144 C_CT_KILL_CONFIG = 0xa4,
149 SENSITIVITY_CMD = 0xa8, 145 C_SENSITIVITY = 0xa8,
150 REPLY_PHY_CALIBRATION_CMD = 0xb0, 146 C_PHY_CALIBRATION = 0xb0,
151 REPLY_RX_PHY_CMD = 0xc0, 147 N_RX_PHY = 0xc0,
152 REPLY_RX_MPDU_CMD = 0xc1, 148 N_RX_MPDU = 0xc1,
153 REPLY_RX = 0xc3, 149 N_RX = 0xc3,
154 REPLY_COMPRESSED_BA = 0xc5, 150 N_COMPRESSED_BA = 0xc5,
155 151
156 REPLY_MAX = 0xff 152 IL_CN_MAX = 0xff
157}; 153};
158 154
159/****************************************************************************** 155/******************************************************************************
@@ -163,25 +159,25 @@ enum {
163 * 159 *
164 *****************************************************************************/ 160 *****************************************************************************/
165 161
166/* iwl_cmd_header flags value */ 162/* il_cmd_header flags value */
167#define IWL_CMD_FAILED_MSK 0x40 163#define IL_CMD_FAILED_MSK 0x40
168 164
169#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) 165#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
170#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) 166#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
171#define SEQ_TO_INDEX(s) ((s) & 0xff) 167#define SEQ_TO_IDX(s) ((s) & 0xff)
172#define INDEX_TO_SEQ(i) ((i) & 0xff) 168#define IDX_TO_SEQ(i) ((i) & 0xff)
173#define SEQ_HUGE_FRAME cpu_to_le16(0x4000) 169#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
174#define SEQ_RX_FRAME cpu_to_le16(0x8000) 170#define SEQ_RX_FRAME cpu_to_le16(0x8000)
175 171
176/** 172/**
177 * struct iwl_cmd_header 173 * struct il_cmd_header
178 * 174 *
179 * This header format appears in the beginning of each command sent from the 175 * This header format appears in the beginning of each command sent from the
180 * driver, and each response/notification received from uCode. 176 * driver, and each response/notification received from uCode.
181 */ 177 */
182struct iwl_cmd_header { 178struct il_cmd_header {
183 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 179 u8 cmd; /* Command ID: C_RXON, etc. */
184 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ 180 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
185 /* 181 /*
186 * The driver sets up the sequence number to values of its choosing. 182 * The driver sets up the sequence number to values of its choosing.
187 * uCode does not use this value, but passes it back to the driver 183 * uCode does not use this value, but passes it back to the driver
@@ -192,29 +188,28 @@ struct iwl_cmd_header {
192 * There is one exception: uCode sets bit 15 when it originates 188 * There is one exception: uCode sets bit 15 when it originates
193 * the response/notification, i.e. when the response/notification 189 * the response/notification, i.e. when the response/notification
194 * is not a direct response to a command sent by the driver. For 190 * is not a direct response to a command sent by the driver. For
195 * example, uCode issues REPLY_3945_RX when it sends a received frame 191 * example, uCode issues N_3945_RX when it sends a received frame
196 * to the driver; it is not a direct response to any driver command. 192 * to the driver; it is not a direct response to any driver command.
197 * 193 *
198 * The Linux driver uses the following format: 194 * The Linux driver uses the following format:
199 * 195 *
200 * 0:7 tfd index - position within TX queue 196 * 0:7 tfd idx - position within TX queue
201 * 8:12 TX queue id 197 * 8:12 TX queue id
202 * 13 reserved 198 * 13 reserved
203 * 14 huge - driver sets this to indicate command is in the 199 * 14 huge - driver sets this to indicate command is in the
204 * 'huge' storage at the end of the command buffers 200 * 'huge' storage at the end of the command buffers
205 * 15 unsolicited RX or uCode-originated notification 201 * 15 unsolicited RX or uCode-originated notification
206 */ 202 */
207 __le16 sequence; 203 __le16 sequence;
208 204
209 /* command or response/notification data follows immediately */ 205 /* command or response/notification data follows immediately */
210 u8 data[0]; 206 u8 data[0];
211} __packed; 207} __packed;
212 208
213
214/** 209/**
215 * struct iwl3945_tx_power 210 * struct il3945_tx_power
216 * 211 *
217 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH 212 * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
218 * 213 *
219 * Each entry contains two values: 214 * Each entry contains two values:
220 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained 215 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
@@ -223,21 +218,21 @@ struct iwl_cmd_header {
223 * 2) Radio gain. This sets the analog gain of the radio Tx path. 218 * 2) Radio gain. This sets the analog gain of the radio Tx path.
224 * It is a coarser setting, and behaves in a logarithmic (dB) fashion. 219 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
225 * 220 *
226 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][]. 221 * Driver obtains values from struct il3945_tx_power power_gain_table[][].
227 */ 222 */
228struct iwl3945_tx_power { 223struct il3945_tx_power {
229 u8 tx_gain; /* gain for analog radio */ 224 u8 tx_gain; /* gain for analog radio */
230 u8 dsp_atten; /* gain for DSP */ 225 u8 dsp_atten; /* gain for DSP */
231} __packed; 226} __packed;
232 227
233/** 228/**
234 * struct iwl3945_power_per_rate 229 * struct il3945_power_per_rate
235 * 230 *
236 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 231 * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
237 */ 232 */
238struct iwl3945_power_per_rate { 233struct il3945_power_per_rate {
239 u8 rate; /* plcp */ 234 u8 rate; /* plcp */
240 struct iwl3945_tx_power tpc; 235 struct il3945_tx_power tpc;
241 u8 reserved; 236 u8 reserved;
242} __packed; 237} __packed;
243 238
@@ -245,10 +240,10 @@ struct iwl3945_power_per_rate {
245 * iwl4965 rate_n_flags bit fields 240 * iwl4965 rate_n_flags bit fields
246 * 241 *
247 * rate_n_flags format is used in following iwl4965 commands: 242 * rate_n_flags format is used in following iwl4965 commands:
248 * REPLY_RX (response only) 243 * N_RX (response only)
249 * REPLY_RX_MPDU (response only) 244 * N_RX_MPDU (response only)
250 * REPLY_TX (both command and response) 245 * C_TX (both command and response)
251 * REPLY_TX_LINK_QUALITY_CMD 246 * C_TX_LINK_QUALITY_CMD
252 * 247 *
253 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): 248 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
254 * 2-0: 0) 6 Mbps 249 * 2-0: 0) 6 Mbps
@@ -326,17 +321,17 @@ struct iwl3945_power_per_rate {
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) 321#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3 322#define RATE_ANT_NUM 3
328 323
329#define POWER_TABLE_NUM_ENTRIES 33 324#define POWER_TBL_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 325#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32 326#define POWER_TBL_CCK_ENTRY 32
332 327
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 328#define IL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2 329#define IL_PWR_CCK_ENTRIES 2
335 330
336/** 331/**
337 * union iwl4965_tx_power_dual_stream 332 * union il4965_tx_power_dual_stream
338 * 333 *
339 * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 334 * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
340 * Use __le32 version (struct tx_power_dual_stream) when building command. 335 * Use __le32 version (struct tx_power_dual_stream) when building command.
341 * 336 *
342 * Driver provides radio gain and DSP attenuation settings to device in pairs, 337 * Driver provides radio gain and DSP attenuation settings to device in pairs,
@@ -347,9 +342,9 @@ struct iwl3945_power_per_rate {
347 * For MIMO rates, one value may be different from the other, 342 * For MIMO rates, one value may be different from the other,
348 * in order to balance the Tx output between the two transmitters. 343 * in order to balance the Tx output between the two transmitters.
349 * 344 *
350 * See more details in doc for TXPOWER in iwl-4965-hw.h. 345 * See more details in doc for TXPOWER in 4965.h.
351 */ 346 */
352union iwl4965_tx_power_dual_stream { 347union il4965_tx_power_dual_stream {
353 struct { 348 struct {
354 u8 radio_tx_gain[2]; 349 u8 radio_tx_gain[2];
355 u8 dsp_predis_atten[2]; 350 u8 dsp_predis_atten[2];
@@ -360,21 +355,21 @@ union iwl4965_tx_power_dual_stream {
360/** 355/**
361 * struct tx_power_dual_stream 356 * struct tx_power_dual_stream
362 * 357 *
363 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 358 * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
364 * 359 *
365 * Same format as iwl_tx_power_dual_stream, but __le32 360 * Same format as il_tx_power_dual_stream, but __le32
366 */ 361 */
367struct tx_power_dual_stream { 362struct tx_power_dual_stream {
368 __le32 dw; 363 __le32 dw;
369} __packed; 364} __packed;
370 365
371/** 366/**
372 * struct iwl4965_tx_power_db 367 * struct il4965_tx_power_db
373 * 368 *
374 * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 369 * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
375 */ 370 */
376struct iwl4965_tx_power_db { 371struct il4965_tx_power_db {
377 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; 372 struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
378} __packed; 373} __packed;
379 374
380/****************************************************************************** 375/******************************************************************************
@@ -387,7 +382,7 @@ struct iwl4965_tx_power_db {
387#define INITIALIZE_SUBTYPE (9) 382#define INITIALIZE_SUBTYPE (9)
388 383
389/* 384/*
390 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command) 385 * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
391 * 386 *
392 * uCode issues this "initialize alive" notification once the initialization 387 * uCode issues this "initialize alive" notification once the initialization
393 * uCode image has completed its work, and is ready to load the runtime image. 388 * uCode image has completed its work, and is ready to load the runtime image.
@@ -410,7 +405,7 @@ struct iwl4965_tx_power_db {
410 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, 405 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
411 * for each of 5 frequency ranges. 406 * for each of 5 frequency ranges.
412 */ 407 */
413struct iwl_init_alive_resp { 408struct il_init_alive_resp {
414 u8 ucode_minor; 409 u8 ucode_minor;
415 u8 ucode_major; 410 u8 ucode_major;
416 __le16 reserved1; 411 __le16 reserved1;
@@ -433,9 +428,8 @@ struct iwl_init_alive_resp {
433 * 2 Tx chains */ 428 * 2 Tx chains */
434} __packed; 429} __packed;
435 430
436
437/** 431/**
438 * REPLY_ALIVE = 0x1 (response only, not a command) 432 * N_ALIVE = 0x1 (response only, not a command)
439 * 433 *
440 * uCode issues this "alive" notification once the runtime image is ready 434 * uCode issues this "alive" notification once the runtime image is ready
441 * to receive commands from the driver. This is the *second* "alive" 435 * to receive commands from the driver. This is the *second* "alive"
@@ -454,7 +448,7 @@ struct iwl_init_alive_resp {
454 * __le32 log_size; log capacity (in number of entries) 448 * __le32 log_size; log capacity (in number of entries)
455 * __le32 type; (1) timestamp with each entry, (0) no timestamp 449 * __le32 type; (1) timestamp with each entry, (0) no timestamp
456 * __le32 wraps; # times uCode has wrapped to top of circular buffer 450 * __le32 wraps; # times uCode has wrapped to top of circular buffer
457 * __le32 write_index; next circular buffer entry that uCode would fill 451 * __le32 write_idx; next circular buffer entry that uCode would fill
458 * 452 *
459 * The header is followed by the circular buffer of log entries. Entries 453 * The header is followed by the circular buffer of log entries. Entries
460 * with timestamps have the following format: 454 * with timestamps have the following format:
@@ -511,13 +505,13 @@ struct iwl_init_alive_resp {
511 * The Linux driver can print both logs to the system log when a uCode error 505 * The Linux driver can print both logs to the system log when a uCode error
512 * occurs. 506 * occurs.
513 */ 507 */
514struct iwl_alive_resp { 508struct il_alive_resp {
515 u8 ucode_minor; 509 u8 ucode_minor;
516 u8 ucode_major; 510 u8 ucode_major;
517 __le16 reserved1; 511 __le16 reserved1;
518 u8 sw_rev[8]; 512 u8 sw_rev[8];
519 u8 ver_type; 513 u8 ver_type;
520 u8 ver_subtype; /* not "9" for runtime alive */ 514 u8 ver_subtype; /* not "9" for runtime alive */
521 __le16 reserved2; 515 __le16 reserved2;
522 __le32 log_event_table_ptr; /* SRAM address for event log */ 516 __le32 log_event_table_ptr; /* SRAM address for event log */
523 __le32 error_event_table_ptr; /* SRAM address for error log */ 517 __le32 error_event_table_ptr; /* SRAM address for error log */
@@ -526,9 +520,9 @@ struct iwl_alive_resp {
526} __packed; 520} __packed;
527 521
528/* 522/*
529 * REPLY_ERROR = 0x2 (response only, not a command) 523 * N_ERROR = 0x2 (response only, not a command)
530 */ 524 */
531struct iwl_error_resp { 525struct il_error_resp {
532 __le32 error_type; 526 __le32 error_type;
533 u8 cmd_id; 527 u8 cmd_id;
534 u8 reserved1; 528 u8 reserved1;
@@ -554,7 +548,6 @@ enum {
554 RXON_DEV_TYPE_SNIFFER = 6, 548 RXON_DEV_TYPE_SNIFFER = 6,
555}; 549};
556 550
557
558#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) 551#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
559#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) 552#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
560#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) 553#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
@@ -593,7 +586,6 @@ enum {
593* (according to ON_AIR deassertion) */ 586* (according to ON_AIR deassertion) */
594#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) 587#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
595 588
596
597/* HT flags */ 589/* HT flags */
598#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) 590#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
599#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) 591#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
@@ -640,7 +632,7 @@ enum {
640#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) 632#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
641 633
642/** 634/**
643 * REPLY_RXON = 0x10 (command, has simple generic response) 635 * C_RXON = 0x10 (command, has simple generic response)
644 * 636 *
645 * RXON tunes the radio tuner to a service channel, and sets up a number 637 * RXON tunes the radio tuner to a service channel, and sets up a number
646 * of parameters that are used primarily for Rx, but also for Tx operations. 638 * of parameters that are used primarily for Rx, but also for Tx operations.
@@ -653,11 +645,11 @@ enum {
653 * channel. 645 * channel.
654 * 646 *
655 * NOTE: All RXONs wipe clean the internal txpower table. Driver must 647 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
656 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), 648 * issue a new C_TX_PWR_TBL after each C_RXON (0x10),
657 * regardless of whether RXON_FILTER_ASSOC_MSK is set. 649 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
658 */ 650 */
659 651
660struct iwl3945_rxon_cmd { 652struct il3945_rxon_cmd {
661 u8 node_addr[6]; 653 u8 node_addr[6];
662 __le16 reserved1; 654 __le16 reserved1;
663 u8 bssid_addr[6]; 655 u8 bssid_addr[6];
@@ -676,7 +668,7 @@ struct iwl3945_rxon_cmd {
676 __le16 reserved5; 668 __le16 reserved5;
677} __packed; 669} __packed;
678 670
679struct iwl4965_rxon_cmd { 671struct il4965_rxon_cmd {
680 u8 node_addr[6]; 672 u8 node_addr[6];
681 __le16 reserved1; 673 __le16 reserved1;
682 u8 bssid_addr[6]; 674 u8 bssid_addr[6];
@@ -699,7 +691,7 @@ struct iwl4965_rxon_cmd {
699/* Create a common rxon cmd which will be typecast into the 3945 or 4965 691/* Create a common rxon cmd which will be typecast into the 3945 or 4965
700 * specific rxon cmd, depending on where it is called from. 692 * specific rxon cmd, depending on where it is called from.
701 */ 693 */
702struct iwl_legacy_rxon_cmd { 694struct il_rxon_cmd {
703 u8 node_addr[6]; 695 u8 node_addr[6];
704 __le16 reserved1; 696 __le16 reserved1;
705 u8 bssid_addr[6]; 697 u8 bssid_addr[6];
@@ -721,11 +713,10 @@ struct iwl_legacy_rxon_cmd {
721 u8 reserved5; 713 u8 reserved5;
722} __packed; 714} __packed;
723 715
724
725/* 716/*
726 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) 717 * C_RXON_ASSOC = 0x11 (command, has simple generic response)
727 */ 718 */
728struct iwl3945_rxon_assoc_cmd { 719struct il3945_rxon_assoc_cmd {
729 __le32 flags; 720 __le32 flags;
730 __le32 filter_flags; 721 __le32 filter_flags;
731 u8 ofdm_basic_rates; 722 u8 ofdm_basic_rates;
@@ -733,7 +724,7 @@ struct iwl3945_rxon_assoc_cmd {
733 __le16 reserved; 724 __le16 reserved;
734} __packed; 725} __packed;
735 726
736struct iwl4965_rxon_assoc_cmd { 727struct il4965_rxon_assoc_cmd {
737 __le32 flags; 728 __le32 flags;
738 __le32 filter_flags; 729 __le32 filter_flags;
739 u8 ofdm_basic_rates; 730 u8 ofdm_basic_rates;
@@ -744,17 +735,17 @@ struct iwl4965_rxon_assoc_cmd {
744 __le16 reserved; 735 __le16 reserved;
745} __packed; 736} __packed;
746 737
747#define IWL_CONN_MAX_LISTEN_INTERVAL 10 738#define IL_CONN_MAX_LISTEN_INTERVAL 10
748#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ 739#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
749#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ 740#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
750 741
751/* 742/*
752 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 743 * C_RXON_TIMING = 0x14 (command, has simple generic response)
753 */ 744 */
754struct iwl_rxon_time_cmd { 745struct il_rxon_time_cmd {
755 __le64 timestamp; 746 __le64 timestamp;
756 __le16 beacon_interval; 747 __le16 beacon_interval;
757 __le16 atim_window; 748 __le16 atim_win;
758 __le32 beacon_init_val; 749 __le32 beacon_init_val;
759 __le16 listen_interval; 750 __le16 listen_interval;
760 u8 dtim_period; 751 u8 dtim_period;
@@ -762,32 +753,32 @@ struct iwl_rxon_time_cmd {
762} __packed; 753} __packed;
763 754
764/* 755/*
765 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) 756 * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
766 */ 757 */
767struct iwl3945_channel_switch_cmd { 758struct il3945_channel_switch_cmd {
768 u8 band; 759 u8 band;
769 u8 expect_beacon; 760 u8 expect_beacon;
770 __le16 channel; 761 __le16 channel;
771 __le32 rxon_flags; 762 __le32 rxon_flags;
772 __le32 rxon_filter_flags; 763 __le32 rxon_filter_flags;
773 __le32 switch_time; 764 __le32 switch_time;
774 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 765 struct il3945_power_per_rate power[IL_MAX_RATES];
775} __packed; 766} __packed;
776 767
777struct iwl4965_channel_switch_cmd { 768struct il4965_channel_switch_cmd {
778 u8 band; 769 u8 band;
779 u8 expect_beacon; 770 u8 expect_beacon;
780 __le16 channel; 771 __le16 channel;
781 __le32 rxon_flags; 772 __le32 rxon_flags;
782 __le32 rxon_filter_flags; 773 __le32 rxon_filter_flags;
783 __le32 switch_time; 774 __le32 switch_time;
784 struct iwl4965_tx_power_db tx_power; 775 struct il4965_tx_power_db tx_power;
785} __packed; 776} __packed;
786 777
787/* 778/*
788 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) 779 * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
789 */ 780 */
790struct iwl_csa_notification { 781struct il_csa_notification {
791 __le16 band; 782 __le16 band;
792 __le16 channel; 783 __le16 channel;
793 __le32 status; /* 0 - OK, 1 - fail */ 784 __le32 status; /* 0 - OK, 1 - fail */
@@ -800,22 +791,22 @@ struct iwl_csa_notification {
800 *****************************************************************************/ 791 *****************************************************************************/
801 792
802/** 793/**
803 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM 794 * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
804 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd 795 * One for each of 4 EDCA access categories in struct il_qosparam_cmd
805 * 796 *
806 * @cw_min: Contention window, start value in numbers of slots. 797 * @cw_min: Contention win, start value in numbers of slots.
807 * Should be a power-of-2, minus 1. Device's default is 0x0f. 798 * Should be a power-of-2, minus 1. Device's default is 0x0f.
808 * @cw_max: Contention window, max value in numbers of slots. 799 * @cw_max: Contention win, max value in numbers of slots.
809 * Should be a power-of-2, minus 1. Device's default is 0x3f. 800 * Should be a power-of-2, minus 1. Device's default is 0x3f.
810 * @aifsn: Number of slots in Arbitration Interframe Space (before 801 * @aifsn: Number of slots in Arbitration Interframe Space (before
811 * performing random backoff timing prior to Tx). Device default 1. 802 * performing random backoff timing prior to Tx). Device default 1.
812 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. 803 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
813 * 804 *
814 * Device will automatically increase contention window by (2*CW) + 1 for each 805 * Device will automatically increase contention win by (2*CW) + 1 for each
815 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW 806 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
816 * value, to cap the CW value. 807 * value, to cap the CW value.
817 */ 808 */
818struct iwl_ac_qos { 809struct il_ac_qos {
819 __le16 cw_min; 810 __le16 cw_min;
820 __le16 cw_max; 811 __le16 cw_max;
821 u8 aifsn; 812 u8 aifsn;
@@ -832,14 +823,14 @@ struct iwl_ac_qos {
832#define AC_NUM 4 823#define AC_NUM 4
833 824
834/* 825/*
835 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) 826 * C_QOS_PARAM = 0x13 (command, has simple generic response)
836 * 827 *
837 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs 828 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
838 * 0: Background, 1: Best Effort, 2: Video, 3: Voice. 829 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
839 */ 830 */
840struct iwl_qosparam_cmd { 831struct il_qosparam_cmd {
841 __le32 qos_flags; 832 __le32 qos_flags;
842 struct iwl_ac_qos ac[AC_NUM]; 833 struct il_ac_qos ac[AC_NUM];
843} __packed; 834} __packed;
844 835
845/****************************************************************************** 836/******************************************************************************
@@ -852,15 +843,15 @@ struct iwl_qosparam_cmd {
852 */ 843 */
853 844
854/* Special, dedicated locations within device's station table */ 845/* Special, dedicated locations within device's station table */
855#define IWL_AP_ID 0 846#define IL_AP_ID 0
856#define IWL_STA_ID 2 847#define IL_STA_ID 2
857#define IWL3945_BROADCAST_ID 24 848#define IL3945_BROADCAST_ID 24
858#define IWL3945_STATION_COUNT 25 849#define IL3945_STATION_COUNT 25
859#define IWL4965_BROADCAST_ID 31 850#define IL4965_BROADCAST_ID 31
860#define IWL4965_STATION_COUNT 32 851#define IL4965_STATION_COUNT 32
861 852
862#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 853#define IL_STATION_COUNT 32 /* MAX(3945,4965) */
863#define IWL_INVALID_STATION 255 854#define IL_INVALID_STATION 255
864 855
865#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 856#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
866#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) 857#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -901,11 +892,11 @@ struct iwl_qosparam_cmd {
901#define STA_MODIFY_DELBA_TID_MSK 0x10 892#define STA_MODIFY_DELBA_TID_MSK 0x10
902#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 893#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
903 894
904/* Receiver address (actually, Rx station's index into station table), 895/* Receiver address (actually, Rx station's idx into station table),
905 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 896 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
906#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 897#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
907 898
908struct iwl4965_keyinfo { 899struct il4965_keyinfo {
909 __le16 key_flags; 900 __le16 key_flags;
910 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ 901 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
911 u8 reserved1; 902 u8 reserved1;
@@ -918,12 +909,12 @@ struct iwl4965_keyinfo {
918/** 909/**
919 * struct sta_id_modify 910 * struct sta_id_modify
920 * @addr[ETH_ALEN]: station's MAC address 911 * @addr[ETH_ALEN]: station's MAC address
921 * @sta_id: index of station in uCode's station table 912 * @sta_id: idx of station in uCode's station table
922 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change 913 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
923 * 914 *
924 * Driver selects unused table index when adding new station, 915 * Driver selects unused table idx when adding new station,
925 * or the index to a pre-existing station entry when modifying that station. 916 * or the idx to a pre-existing station entry when modifying that station.
926 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). 917 * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
927 * 918 *
928 * modify_mask flags select which parameters to modify vs. leave alone. 919 * modify_mask flags select which parameters to modify vs. leave alone.
929 */ 920 */
@@ -936,15 +927,15 @@ struct sta_id_modify {
936} __packed; 927} __packed;
937 928
938/* 929/*
939 * REPLY_ADD_STA = 0x18 (command) 930 * C_ADD_STA = 0x18 (command)
940 * 931 *
941 * The device contains an internal table of per-station information, 932 * The device contains an internal table of per-station information,
942 * with info on security keys, aggregation parameters, and Tx rates for 933 * with info on security keys, aggregation parameters, and Tx rates for
943 * initial Tx attempt and any retries (4965 devices uses 934 * initial Tx attempt and any retries (4965 devices uses
944 * REPLY_TX_LINK_QUALITY_CMD, 935 * C_TX_LINK_QUALITY_CMD,
945 * 3945 uses REPLY_RATE_SCALE to set up rate tables). 936 * 3945 uses C_RATE_SCALE to set up rate tables).
946 * 937 *
947 * REPLY_ADD_STA sets up the table entry for one station, either creating 938 * C_ADD_STA sets up the table entry for one station, either creating
948 * a new entry, or modifying a pre-existing one. 939 * a new entry, or modifying a pre-existing one.
949 * 940 *
950 * NOTE: RXON command (without "associated" bit set) wipes the station table 941 * NOTE: RXON command (without "associated" bit set) wipes the station table
@@ -954,20 +945,20 @@ struct sta_id_modify {
954 * their own txpower/rate setup data). 945 * their own txpower/rate setup data).
955 * 946 *
956 * When getting started on a new channel, driver must set up the 947 * When getting started on a new channel, driver must set up the
957 * IWL_BROADCAST_ID entry (last entry in the table). For a client 948 * IL_BROADCAST_ID entry (last entry in the table). For a client
958 * station in a BSS, once an AP is selected, driver sets up the AP STA 949 * station in a BSS, once an AP is selected, driver sets up the AP STA
959 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP 950 * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP
960 * are all that are needed for a BSS client station. If the device is 951 * are all that are needed for a BSS client station. If the device is
961 * used as AP, or in an IBSS network, driver must set up station table 952 * used as AP, or in an IBSS network, driver must set up station table
962 * entries for all STAs in network, starting with index IWL_STA_ID. 953 * entries for all STAs in network, starting with idx IL_STA_ID.
963 */ 954 */
964 955
965struct iwl3945_addsta_cmd { 956struct il3945_addsta_cmd {
966 u8 mode; /* 1: modify existing, 0: add new station */ 957 u8 mode; /* 1: modify existing, 0: add new station */
967 u8 reserved[3]; 958 u8 reserved[3];
968 struct sta_id_modify sta; 959 struct sta_id_modify sta;
969 struct iwl4965_keyinfo key; 960 struct il4965_keyinfo key;
970 __le32 station_flags; /* STA_FLG_* */ 961 __le32 station_flags; /* STA_FLG_* */
971 __le32 station_flags_msk; /* STA_FLG_* */ 962 __le32 station_flags_msk; /* STA_FLG_* */
972 963
973 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) 964 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -990,12 +981,12 @@ struct iwl3945_addsta_cmd {
990 __le16 add_immediate_ba_ssn; 981 __le16 add_immediate_ba_ssn;
991} __packed; 982} __packed;
992 983
993struct iwl4965_addsta_cmd { 984struct il4965_addsta_cmd {
994 u8 mode; /* 1: modify existing, 0: add new station */ 985 u8 mode; /* 1: modify existing, 0: add new station */
995 u8 reserved[3]; 986 u8 reserved[3];
996 struct sta_id_modify sta; 987 struct sta_id_modify sta;
997 struct iwl4965_keyinfo key; 988 struct il4965_keyinfo key;
998 __le32 station_flags; /* STA_FLG_* */ 989 __le32 station_flags; /* STA_FLG_* */
999 __le32 station_flags_msk; /* STA_FLG_* */ 990 __le32 station_flags_msk; /* STA_FLG_* */
1000 991
1001 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) 992 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1003,7 +994,7 @@ struct iwl4965_addsta_cmd {
1003 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ 994 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1004 __le16 tid_disable_tx; 995 __le16 tid_disable_tx;
1005 996
1006 __le16 reserved1; 997 __le16 reserved1;
1007 998
1008 /* TID for which to add block-ack support. 999 /* TID for which to add block-ack support.
1009 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1000 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1028,12 +1019,12 @@ struct iwl4965_addsta_cmd {
1028} __packed; 1019} __packed;
1029 1020
1030/* Wrapper struct for 3945 and 4965 addsta_cmd structures */ 1021/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
1031struct iwl_legacy_addsta_cmd { 1022struct il_addsta_cmd {
1032 u8 mode; /* 1: modify existing, 0: add new station */ 1023 u8 mode; /* 1: modify existing, 0: add new station */
1033 u8 reserved[3]; 1024 u8 reserved[3];
1034 struct sta_id_modify sta; 1025 struct sta_id_modify sta;
1035 struct iwl4965_keyinfo key; 1026 struct il4965_keyinfo key;
1036 __le32 station_flags; /* STA_FLG_* */ 1027 __le32 station_flags; /* STA_FLG_* */
1037 __le32 station_flags_msk; /* STA_FLG_* */ 1028 __le32 station_flags_msk; /* STA_FLG_* */
1038 1029
1039 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) 1030 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1041,7 +1032,7 @@ struct iwl_legacy_addsta_cmd {
1041 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ 1032 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1042 __le16 tid_disable_tx; 1033 __le16 tid_disable_tx;
1043 1034
1044 __le16 rate_n_flags; /* 3945 only */ 1035 __le16 rate_n_flags; /* 3945 only */
1045 1036
1046 /* TID for which to add block-ack support. 1037 /* TID for which to add block-ack support.
1047 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1038 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1065,51 +1056,50 @@ struct iwl_legacy_addsta_cmd {
1065 __le16 reserved2; 1056 __le16 reserved2;
1066} __packed; 1057} __packed;
1067 1058
1068
1069#define ADD_STA_SUCCESS_MSK 0x1 1059#define ADD_STA_SUCCESS_MSK 0x1
1070#define ADD_STA_NO_ROOM_IN_TABLE 0x2 1060#define ADD_STA_NO_ROOM_IN_TBL 0x2
1071#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 1061#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
1072#define ADD_STA_MODIFY_NON_EXIST_STA 0x8 1062#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
1073/* 1063/*
1074 * REPLY_ADD_STA = 0x18 (response) 1064 * C_ADD_STA = 0x18 (response)
1075 */ 1065 */
1076struct iwl_add_sta_resp { 1066struct il_add_sta_resp {
1077 u8 status; /* ADD_STA_* */ 1067 u8 status; /* ADD_STA_* */
1078} __packed; 1068} __packed;
1079 1069
1080#define REM_STA_SUCCESS_MSK 0x1 1070#define REM_STA_SUCCESS_MSK 0x1
1081/* 1071/*
1082 * REPLY_REM_STA = 0x19 (response) 1072 * C_REM_STA = 0x19 (response)
1083 */ 1073 */
1084struct iwl_rem_sta_resp { 1074struct il_rem_sta_resp {
1085 u8 status; 1075 u8 status;
1086} __packed; 1076} __packed;
1087 1077
1088/* 1078/*
1089 * REPLY_REM_STA = 0x19 (command) 1079 * C_REM_STA = 0x19 (command)
1090 */ 1080 */
1091struct iwl_rem_sta_cmd { 1081struct il_rem_sta_cmd {
1092 u8 num_sta; /* number of removed stations */ 1082 u8 num_sta; /* number of removed stations */
1093 u8 reserved[3]; 1083 u8 reserved[3];
1094 u8 addr[ETH_ALEN]; /* MAC addr of the first station */ 1084 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1095 u8 reserved2[2]; 1085 u8 reserved2[2];
1096} __packed; 1086} __packed;
1097 1087
1098#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) 1088#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1099#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) 1089#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1100#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) 1090#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1101#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) 1091#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1102#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) 1092#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1103 1093
1104#define IWL_DROP_SINGLE 0 1094#define IL_DROP_SINGLE 0
1105#define IWL_DROP_SELECTED 1 1095#define IL_DROP_SELECTED 1
1106#define IWL_DROP_ALL 2 1096#define IL_DROP_ALL 2
1107 1097
1108/* 1098/*
1109 * REPLY_WEP_KEY = 0x20 1099 * REPLY_WEP_KEY = 0x20
1110 */ 1100 */
1111struct iwl_wep_key { 1101struct il_wep_key {
1112 u8 key_index; 1102 u8 key_idx;
1113 u8 key_offset; 1103 u8 key_offset;
1114 u8 reserved1[2]; 1104 u8 reserved1[2];
1115 u8 key_size; 1105 u8 key_size;
@@ -1117,12 +1107,12 @@ struct iwl_wep_key {
1117 u8 key[16]; 1107 u8 key[16];
1118} __packed; 1108} __packed;
1119 1109
1120struct iwl_wep_cmd { 1110struct il_wep_cmd {
1121 u8 num_keys; 1111 u8 num_keys;
1122 u8 global_key_type; 1112 u8 global_key_type;
1123 u8 flags; 1113 u8 flags;
1124 u8 reserved; 1114 u8 reserved;
1125 struct iwl_wep_key key[0]; 1115 struct il_wep_key key[0];
1126} __packed; 1116} __packed;
1127 1117
1128#define WEP_KEY_WEP_TYPE 1 1118#define WEP_KEY_WEP_TYPE 1
@@ -1168,8 +1158,7 @@ struct iwl_wep_cmd {
1168#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) 1158#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1169#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) 1159#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1170 1160
1171 1161struct il3945_rx_frame_stats {
1172struct iwl3945_rx_frame_stats {
1173 u8 phy_count; 1162 u8 phy_count;
1174 u8 id; 1163 u8 id;
1175 u8 rssi; 1164 u8 rssi;
@@ -1179,7 +1168,7 @@ struct iwl3945_rx_frame_stats {
1179 u8 payload[0]; 1168 u8 payload[0];
1180} __packed; 1169} __packed;
1181 1170
1182struct iwl3945_rx_frame_hdr { 1171struct il3945_rx_frame_hdr {
1183 __le16 channel; 1172 __le16 channel;
1184 __le16 phy_flags; 1173 __le16 phy_flags;
1185 u8 reserved1; 1174 u8 reserved1;
@@ -1188,73 +1177,71 @@ struct iwl3945_rx_frame_hdr {
1188 u8 payload[0]; 1177 u8 payload[0];
1189} __packed; 1178} __packed;
1190 1179
1191struct iwl3945_rx_frame_end { 1180struct il3945_rx_frame_end {
1192 __le32 status; 1181 __le32 status;
1193 __le64 timestamp; 1182 __le64 timestamp;
1194 __le32 beacon_timestamp; 1183 __le32 beacon_timestamp;
1195} __packed; 1184} __packed;
1196 1185
1197/* 1186/*
1198 * REPLY_3945_RX = 0x1b (response only, not a command) 1187 * N_3945_RX = 0x1b (response only, not a command)
1199 * 1188 *
1200 * NOTE: DO NOT dereference from casts to this structure 1189 * NOTE: DO NOT dereference from casts to this structure
1201 * It is provided only for calculating minimum data set size. 1190 * It is provided only for calculating minimum data set size.
1202 * The actual offsets of the hdr and end are dynamic based on 1191 * The actual offsets of the hdr and end are dynamic based on
1203 * stats.phy_count 1192 * stats.phy_count
1204 */ 1193 */
1205struct iwl3945_rx_frame { 1194struct il3945_rx_frame {
1206 struct iwl3945_rx_frame_stats stats; 1195 struct il3945_rx_frame_stats stats;
1207 struct iwl3945_rx_frame_hdr hdr; 1196 struct il3945_rx_frame_hdr hdr;
1208 struct iwl3945_rx_frame_end end; 1197 struct il3945_rx_frame_end end;
1209} __packed; 1198} __packed;
1210 1199
1211#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) 1200#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame))
1212 1201
1213/* Fixed (non-configurable) rx data from phy */ 1202/* Fixed (non-configurable) rx data from phy */
1214 1203
1215#define IWL49_RX_RES_PHY_CNT 14 1204#define IL49_RX_RES_PHY_CNT 14
1216#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) 1205#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1217#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) 1206#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1218#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ 1207#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1219#define IWL49_AGC_DB_POS (7) 1208#define IL49_AGC_DB_POS (7)
1220struct iwl4965_rx_non_cfg_phy { 1209struct il4965_rx_non_cfg_phy {
1221 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ 1210 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1222 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ 1211 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1223 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ 1212 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1224 u8 pad[0]; 1213 u8 pad[0];
1225} __packed; 1214} __packed;
1226 1215
1227
1228/* 1216/*
1229 * REPLY_RX = 0xc3 (response only, not a command) 1217 * N_RX = 0xc3 (response only, not a command)
1230 * Used only for legacy (non 11n) frames. 1218 * Used only for legacy (non 11n) frames.
1231 */ 1219 */
1232struct iwl_rx_phy_res { 1220struct il_rx_phy_res {
1233 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ 1221 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1234 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ 1222 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1235 u8 stat_id; /* configurable DSP phy data set ID */ 1223 u8 stat_id; /* configurable DSP phy data set ID */
1236 u8 reserved1; 1224 u8 reserved1;
1237 __le64 timestamp; /* TSF at on air rise */ 1225 __le64 timestamp; /* TSF at on air rise */
1238 __le32 beacon_time_stamp; /* beacon at on-air rise */ 1226 __le32 beacon_time_stamp; /* beacon at on-air rise */
1239 __le16 phy_flags; /* general phy flags: band, modulation, ... */ 1227 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1240 __le16 channel; /* channel number */ 1228 __le16 channel; /* channel number */
1241 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ 1229 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1242 __le32 rate_n_flags; /* RATE_MCS_* */ 1230 __le32 rate_n_flags; /* RATE_MCS_* */
1243 __le16 byte_count; /* frame's byte-count */ 1231 __le16 byte_count; /* frame's byte-count */
1244 __le16 frame_time; /* frame's time on the air */ 1232 __le16 frame_time; /* frame's time on the air */
1245} __packed; 1233} __packed;
1246 1234
1247struct iwl_rx_mpdu_res_start { 1235struct il_rx_mpdu_res_start {
1248 __le16 byte_count; 1236 __le16 byte_count;
1249 __le16 reserved; 1237 __le16 reserved;
1250} __packed; 1238} __packed;
1251 1239
1252
1253/****************************************************************************** 1240/******************************************************************************
1254 * (5) 1241 * (5)
1255 * Tx Commands & Responses: 1242 * Tx Commands & Responses:
1256 * 1243 *
1257 * Driver must place each REPLY_TX command into one of the prioritized Tx 1244 * Driver must place each C_TX command into one of the prioritized Tx
1258 * queues in host DRAM, shared between driver and device (see comments for 1245 * queues in host DRAM, shared between driver and device (see comments for
1259 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode 1246 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1260 * are preparing to transmit, the device pulls the Tx command over the PCI 1247 * are preparing to transmit, the device pulls the Tx command over the PCI
@@ -1264,18 +1251,18 @@ struct iwl_rx_mpdu_res_start {
1264 * uCode handles all timing and protocol related to control frames 1251 * uCode handles all timing and protocol related to control frames
1265 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler 1252 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1266 * handle reception of block-acks; uCode updates the host driver via 1253 * handle reception of block-acks; uCode updates the host driver via
1267 * REPLY_COMPRESSED_BA. 1254 * N_COMPRESSED_BA.
1268 * 1255 *
1269 * uCode handles retrying Tx when an ACK is expected but not received. 1256 * uCode handles retrying Tx when an ACK is expected but not received.
1270 * This includes trying lower data rates than the one requested in the Tx 1257 * This includes trying lower data rates than the one requested in the Tx
1271 * command, as set up by the REPLY_RATE_SCALE (for 3945) or 1258 * command, as set up by the C_RATE_SCALE (for 3945) or
1272 * REPLY_TX_LINK_QUALITY_CMD (4965). 1259 * C_TX_LINK_QUALITY_CMD (4965).
1273 * 1260 *
1274 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. 1261 * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
1275 * This command must be executed after every RXON command, before Tx can occur. 1262 * This command must be executed after every RXON command, before Tx can occur.
1276 *****************************************************************************/ 1263 *****************************************************************************/
1277 1264
1278/* REPLY_TX Tx flags field */ 1265/* C_TX Tx flags field */
1279 1266
1280/* 1267/*
1281 * 1: Use Request-To-Send protocol before this frame. 1268 * 1: Use Request-To-Send protocol before this frame.
@@ -1296,8 +1283,8 @@ struct iwl_rx_mpdu_res_start {
1296#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) 1283#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1297 1284
1298/* For 4965 devices: 1285/* For 4965 devices:
1299 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). 1286 * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
1300 * Tx command's initial_rate_index indicates first rate to try; 1287 * Tx command's initial_rate_idx indicates first rate to try;
1301 * uCode walks through table for additional Tx attempts. 1288 * uCode walks through table for additional Tx attempts.
1302 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. 1289 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1303 * This rate will be used for all Tx attempts; it will not be scaled. */ 1290 * This rate will be used for all Tx attempts; it will not be scaled. */
@@ -1322,7 +1309,7 @@ struct iwl_rx_mpdu_res_start {
1322/* 1: uCode overrides sequence control field in MAC header. 1309/* 1: uCode overrides sequence control field in MAC header.
1323 * 0: Driver provides sequence control field in MAC header. 1310 * 0: Driver provides sequence control field in MAC header.
1324 * Set this for management frames, non-QOS data frames, non-unicast frames, 1311 * Set this for management frames, non-QOS data frames, non-unicast frames,
1325 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ 1312 * and also in Tx command embedded in C_SCAN for active scans. */
1326#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) 1313#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1327 1314
1328/* 1: This frame is non-last MPDU; more fragments are coming. 1315/* 1: This frame is non-last MPDU; more fragments are coming.
@@ -1349,7 +1336,6 @@ struct iwl_rx_mpdu_res_start {
1349/* HCCA-AP - disable duration overwriting. */ 1336/* HCCA-AP - disable duration overwriting. */
1350#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) 1337#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1351 1338
1352
1353/* 1339/*
1354 * TX command security control 1340 * TX command security control
1355 */ 1341 */
@@ -1369,10 +1355,10 @@ struct iwl_rx_mpdu_res_start {
1369#define TKIP_ICV_LEN 4 1355#define TKIP_ICV_LEN 4
1370 1356
1371/* 1357/*
1372 * REPLY_TX = 0x1c (command) 1358 * C_TX = 0x1c (command)
1373 */ 1359 */
1374 1360
1375struct iwl3945_tx_cmd { 1361struct il3945_tx_cmd {
1376 /* 1362 /*
1377 * MPDU byte count: 1363 * MPDU byte count:
1378 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, 1364 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1434,9 +1420,9 @@ struct iwl3945_tx_cmd {
1434} __packed; 1420} __packed;
1435 1421
1436/* 1422/*
1437 * REPLY_TX = 0x1c (response) 1423 * C_TX = 0x1c (response)
1438 */ 1424 */
1439struct iwl3945_tx_resp { 1425struct il3945_tx_resp {
1440 u8 failure_rts; 1426 u8 failure_rts;
1441 u8 failure_frame; 1427 u8 failure_frame;
1442 u8 bt_kill_count; 1428 u8 bt_kill_count;
@@ -1445,19 +1431,18 @@ struct iwl3945_tx_resp {
1445 __le32 status; /* TX status */ 1431 __le32 status; /* TX status */
1446} __packed; 1432} __packed;
1447 1433
1448
1449/* 1434/*
1450 * 4965 uCode updates these Tx attempt count values in host DRAM. 1435 * 4965 uCode updates these Tx attempt count values in host DRAM.
1451 * Used for managing Tx retries when expecting block-acks. 1436 * Used for managing Tx retries when expecting block-acks.
1452 * Driver should set these fields to 0. 1437 * Driver should set these fields to 0.
1453 */ 1438 */
1454struct iwl_dram_scratch { 1439struct il_dram_scratch {
1455 u8 try_cnt; /* Tx attempts */ 1440 u8 try_cnt; /* Tx attempts */
1456 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ 1441 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1457 __le16 reserved; 1442 __le16 reserved;
1458} __packed; 1443} __packed;
1459 1444
1460struct iwl_tx_cmd { 1445struct il_tx_cmd {
1461 /* 1446 /*
1462 * MPDU byte count: 1447 * MPDU byte count:
1463 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, 1448 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1481,7 +1466,7 @@ struct iwl_tx_cmd {
1481 1466
1482 /* uCode may modify this field of the Tx command (in host DRAM!). 1467 /* uCode may modify this field of the Tx command (in host DRAM!).
1483 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ 1468 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1484 struct iwl_dram_scratch scratch; 1469 struct il_dram_scratch scratch;
1485 1470
1486 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ 1471 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1487 __le32 rate_n_flags; /* RATE_MCS_* */ 1472 __le32 rate_n_flags; /* RATE_MCS_* */
@@ -1493,13 +1478,13 @@ struct iwl_tx_cmd {
1493 u8 sec_ctl; /* TX_CMD_SEC_* */ 1478 u8 sec_ctl; /* TX_CMD_SEC_* */
1494 1479
1495 /* 1480 /*
1496 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial 1481 * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
1497 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for 1482 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1498 * data frames, this field may be used to selectively reduce initial 1483 * data frames, this field may be used to selectively reduce initial
1499 * rate (via non-0 value) for special frames (e.g. management), while 1484 * rate (via non-0 value) for special frames (e.g. management), while
1500 * still supporting rate scaling for all frames. 1485 * still supporting rate scaling for all frames.
1501 */ 1486 */
1502 u8 initial_rate_index; 1487 u8 initial_rate_idx;
1503 u8 reserved; 1488 u8 reserved;
1504 u8 key[16]; 1489 u8 key[16];
1505 __le16 next_frame_flags; 1490 __le16 next_frame_flags;
@@ -1628,12 +1613,12 @@ enum {
1628}; 1613};
1629 1614
1630enum { 1615enum {
1631 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ 1616 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1632 TX_STATUS_DELAY_MSK = 0x00000040, 1617 TX_STATUS_DELAY_MSK = 0x00000040,
1633 TX_STATUS_ABORT_MSK = 0x00000080, 1618 TX_STATUS_ABORT_MSK = 0x00000080,
1634 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ 1619 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1635 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ 1620 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1636 TX_RESERVED = 0x00780000, /* bits 19:22 */ 1621 TX_RESERVED = 0x00780000, /* bits 19:22 */
1637 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ 1622 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1638 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1623 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1639}; 1624};
@@ -1671,7 +1656,7 @@ enum {
1671#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 1656#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1672 1657
1673/* 1658/*
1674 * REPLY_TX = 0x1c (response) 1659 * C_TX = 0x1c (response)
1675 * 1660 *
1676 * This response may be in one of two slightly different formats, indicated 1661 * This response may be in one of two slightly different formats, indicated
1677 * by the frame_count field: 1662 * by the frame_count field:
@@ -1697,7 +1682,7 @@ struct agg_tx_status {
1697 __le16 sequence; 1682 __le16 sequence;
1698} __packed; 1683} __packed;
1699 1684
1700struct iwl4965_tx_resp { 1685struct il4965_tx_resp {
1701 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1686 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1702 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ 1687 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1703 u8 failure_rts; /* # failures due to unsuccessful RTS */ 1688 u8 failure_rts; /* # failures due to unsuccessful RTS */
@@ -1730,16 +1715,16 @@ struct iwl4965_tx_resp {
1730 */ 1715 */
1731 union { 1716 union {
1732 __le32 status; 1717 __le32 status;
1733 struct agg_tx_status agg_status[0]; /* for each agg frame */ 1718 struct agg_tx_status agg_status[0]; /* for each agg frame */
1734 } u; 1719 } u;
1735} __packed; 1720} __packed;
1736 1721
1737/* 1722/*
1738 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1723 * N_COMPRESSED_BA = 0xc5 (response only, not a command)
1739 * 1724 *
1740 * Reports Block-Acknowledge from recipient station 1725 * Reports Block-Acknowledge from recipient station
1741 */ 1726 */
1742struct iwl_compressed_ba_resp { 1727struct il_compressed_ba_resp {
1743 __le32 sta_addr_lo32; 1728 __le32 sta_addr_lo32;
1744 __le16 sta_addr_hi16; 1729 __le16 sta_addr_hi16;
1745 __le16 reserved; 1730 __le16 reserved;
@@ -1754,30 +1739,29 @@ struct iwl_compressed_ba_resp {
1754} __packed; 1739} __packed;
1755 1740
1756/* 1741/*
1757 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) 1742 * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
1758 * 1743 *
1759 * See details under "TXPOWER" in iwl-4965-hw.h. 1744 * See details under "TXPOWER" in 4965.h.
1760 */ 1745 */
1761 1746
1762struct iwl3945_txpowertable_cmd { 1747struct il3945_txpowertable_cmd {
1763 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ 1748 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1764 u8 reserved; 1749 u8 reserved;
1765 __le16 channel; 1750 __le16 channel;
1766 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 1751 struct il3945_power_per_rate power[IL_MAX_RATES];
1767} __packed; 1752} __packed;
1768 1753
1769struct iwl4965_txpowertable_cmd { 1754struct il4965_txpowertable_cmd {
1770 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ 1755 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1771 u8 reserved; 1756 u8 reserved;
1772 __le16 channel; 1757 __le16 channel;
1773 struct iwl4965_tx_power_db tx_power; 1758 struct il4965_tx_power_db tx_power;
1774} __packed; 1759} __packed;
1775 1760
1776
1777/** 1761/**
1778 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response 1762 * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
1779 * 1763 *
1780 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) 1764 * C_RATE_SCALE = 0x47 (command, has simple generic response)
1781 * 1765 *
1782 * NOTE: The table of rates passed to the uCode via the 1766 * NOTE: The table of rates passed to the uCode via the
1783 * RATE_SCALE command sets up the corresponding order of 1767 * RATE_SCALE command sets up the corresponding order of
@@ -1786,22 +1770,21 @@ struct iwl4965_txpowertable_cmd {
1786 * 1770 *
1787 * For example, if you set 9MB (PLCP 0x0f) as the first 1771 * For example, if you set 9MB (PLCP 0x0f) as the first
1788 * rate in the rate table, the bit mask for that rate 1772 * rate in the rate table, the bit mask for that rate
1789 * when passed through ofdm_basic_rates on the REPLY_RXON 1773 * when passed through ofdm_basic_rates on the C_RXON
1790 * command would be bit 0 (1 << 0) 1774 * command would be bit 0 (1 << 0)
1791 */ 1775 */
1792struct iwl3945_rate_scaling_info { 1776struct il3945_rate_scaling_info {
1793 __le16 rate_n_flags; 1777 __le16 rate_n_flags;
1794 u8 try_cnt; 1778 u8 try_cnt;
1795 u8 next_rate_index; 1779 u8 next_rate_idx;
1796} __packed; 1780} __packed;
1797 1781
1798struct iwl3945_rate_scaling_cmd { 1782struct il3945_rate_scaling_cmd {
1799 u8 table_id; 1783 u8 table_id;
1800 u8 reserved[3]; 1784 u8 reserved[3];
1801 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; 1785 struct il3945_rate_scaling_info table[IL_MAX_RATES];
1802} __packed; 1786} __packed;
1803 1787
1804
1805/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ 1788/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1806#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) 1789#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1807 1790
@@ -1816,28 +1799,27 @@ struct iwl3945_rate_scaling_cmd {
1816#define LINK_QUAL_ANT_B_MSK (1 << 1) 1799#define LINK_QUAL_ANT_B_MSK (1 << 1)
1817#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) 1800#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1818 1801
1819
1820/** 1802/**
1821 * struct iwl_link_qual_general_params 1803 * struct il_link_qual_general_params
1822 * 1804 *
1823 * Used in REPLY_TX_LINK_QUALITY_CMD 1805 * Used in C_TX_LINK_QUALITY_CMD
1824 */ 1806 */
1825struct iwl_link_qual_general_params { 1807struct il_link_qual_general_params {
1826 u8 flags; 1808 u8 flags;
1827 1809
1828 /* No entries at or above this (driver chosen) index contain MIMO */ 1810 /* No entries at or above this (driver chosen) idx contain MIMO */
1829 u8 mimo_delimiter; 1811 u8 mimo_delimiter;
1830 1812
1831 /* Best single antenna to use for single stream (legacy, SISO). */ 1813 /* Best single antenna to use for single stream (legacy, SISO). */
1832 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ 1814 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1833 1815
1834 /* Best antennas to use for MIMO (unused for 4965, assumes both). */ 1816 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1835 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ 1817 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1836 1818
1837 /* 1819 /*
1838 * If driver needs to use different initial rates for different 1820 * If driver needs to use different initial rates for different
1839 * EDCA QOS access categories (as implemented by tx fifos 0-3), 1821 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1840 * this table will set that up, by indicating the indexes in the 1822 * this table will set that up, by indicating the idxes in the
1841 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. 1823 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1842 * Otherwise, driver should set all entries to 0. 1824 * Otherwise, driver should set all entries to 0.
1843 * 1825 *
@@ -1845,10 +1827,10 @@ struct iwl_link_qual_general_params {
1845 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice 1827 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1846 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. 1828 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1847 */ 1829 */
1848 u8 start_rate_index[LINK_QUAL_AC_NUM]; 1830 u8 start_rate_idx[LINK_QUAL_AC_NUM];
1849} __packed; 1831} __packed;
1850 1832
1851#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 1833#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1852#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) 1834#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1853#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) 1835#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1854 1836
@@ -1861,11 +1843,11 @@ struct iwl_link_qual_general_params {
1861#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) 1843#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1862 1844
1863/** 1845/**
1864 * struct iwl_link_qual_agg_params 1846 * struct il_link_qual_agg_params
1865 * 1847 *
1866 * Used in REPLY_TX_LINK_QUALITY_CMD 1848 * Used in C_TX_LINK_QUALITY_CMD
1867 */ 1849 */
1868struct iwl_link_qual_agg_params { 1850struct il_link_qual_agg_params {
1869 1851
1870 /* 1852 /*
1871 *Maximum number of uSec in aggregation. 1853 *Maximum number of uSec in aggregation.
@@ -1892,9 +1874,9 @@ struct iwl_link_qual_agg_params {
1892} __packed; 1874} __packed;
1893 1875
1894/* 1876/*
1895 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) 1877 * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1896 * 1878 *
1897 * For 4965 devices only; 3945 uses REPLY_RATE_SCALE. 1879 * For 4965 devices only; 3945 uses C_RATE_SCALE.
1898 * 1880 *
1899 * Each station in the 4965 device's internal station table has its own table 1881 * Each station in the 4965 device's internal station table has its own table
1900 * of 16 1882 * of 16
@@ -1903,13 +1885,13 @@ struct iwl_link_qual_agg_params {
1903 * one station. 1885 * one station.
1904 * 1886 *
1905 * NOTE: Station must already be in 4965 device's station table. 1887 * NOTE: Station must already be in 4965 device's station table.
1906 * Use REPLY_ADD_STA. 1888 * Use C_ADD_STA.
1907 * 1889 *
1908 * The rate scaling procedures described below work well. Of course, other 1890 * The rate scaling procedures described below work well. Of course, other
1909 * procedures are possible, and may work better for particular environments. 1891 * procedures are possible, and may work better for particular environments.
1910 * 1892 *
1911 * 1893 *
1912 * FILLING THE RATE TABLE 1894 * FILLING THE RATE TBL
1913 * 1895 *
1914 * Given a particular initial rate and mode, as determined by the rate 1896 * Given a particular initial rate and mode, as determined by the rate
1915 * scaling algorithm described below, the Linux driver uses the following 1897 * scaling algorithm described below, the Linux driver uses the following
@@ -1948,13 +1930,13 @@ struct iwl_link_qual_agg_params {
1948 * speculative mode as the new current active mode. 1930 * speculative mode as the new current active mode.
1949 * 1931 *
1950 * Each history set contains, separately for each possible rate, data for a 1932 * Each history set contains, separately for each possible rate, data for a
1951 * sliding window of the 62 most recent tx attempts at that rate. The data 1933 * sliding win of the 62 most recent tx attempts at that rate. The data
1952 * includes a shifting bitmap of success(1)/failure(0), and sums of successful 1934 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1953 * and attempted frames, from which the driver can additionally calculate a 1935 * and attempted frames, from which the driver can additionally calculate a
1954 * success ratio (success / attempted) and number of failures 1936 * success ratio (success / attempted) and number of failures
1955 * (attempted - success), and control the size of the window (attempted). 1937 * (attempted - success), and control the size of the win (attempted).
1956 * The driver uses the bit map to remove successes from the success sum, as 1938 * The driver uses the bit map to remove successes from the success sum, as
1957 * the oldest tx attempts fall out of the window. 1939 * the oldest tx attempts fall out of the win.
1958 * 1940 *
1959 * When the 4965 device makes multiple tx attempts for a given frame, each 1941 * When the 4965 device makes multiple tx attempts for a given frame, each
1960 * attempt might be at a different rate, and have different modulation 1942 * attempt might be at a different rate, and have different modulation
@@ -1966,7 +1948,7 @@ struct iwl_link_qual_agg_params {
1966 * 1948 *
1967 * When using block-ack (aggregation), all frames are transmitted at the same 1949 * When using block-ack (aggregation), all frames are transmitted at the same
1968 * rate, since there is no per-attempt acknowledgment from the destination 1950 * rate, since there is no per-attempt acknowledgment from the destination
1969 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in 1951 * station. The Tx response struct il_tx_resp indicates the Tx rate in
1970 * rate_n_flags field. After receiving a block-ack, the driver can update 1952 * rate_n_flags field. After receiving a block-ack, the driver can update
1971 * history for the entire block all at once. 1953 * history for the entire block all at once.
1972 * 1954 *
@@ -2016,8 +1998,8 @@ struct iwl_link_qual_agg_params {
2016 * good performance; higher rate is sure to have poorer success. 1998 * good performance; higher rate is sure to have poorer success.
2017 * 1999 *
2018 * 6) Re-evaluate the rate after each tx frame. If working with block- 2000 * 6) Re-evaluate the rate after each tx frame. If working with block-
2019 * acknowledge, history and statistics may be calculated for the entire 2001 * acknowledge, history and stats may be calculated for the entire
2020 * block (including prior history that fits within the history windows), 2002 * block (including prior history that fits within the history wins),
2021 * before re-evaluation. 2003 * before re-evaluation.
2022 * 2004 *
2023 * FINDING BEST STARTING MODULATION MODE: 2005 * FINDING BEST STARTING MODULATION MODE:
@@ -2079,22 +2061,22 @@ struct iwl_link_qual_agg_params {
2079 * legacy), and then repeat the search process. 2061 * legacy), and then repeat the search process.
2080 * 2062 *
2081 */ 2063 */
2082struct iwl_link_quality_cmd { 2064struct il_link_quality_cmd {
2083 2065
2084 /* Index of destination/recipient station in uCode's station table */ 2066 /* Index of destination/recipient station in uCode's station table */
2085 u8 sta_id; 2067 u8 sta_id;
2086 u8 reserved1; 2068 u8 reserved1;
2087 __le16 control; /* not used */ 2069 __le16 control; /* not used */
2088 struct iwl_link_qual_general_params general_params; 2070 struct il_link_qual_general_params general_params;
2089 struct iwl_link_qual_agg_params agg_params; 2071 struct il_link_qual_agg_params agg_params;
2090 2072
2091 /* 2073 /*
2092 * Rate info; when using rate-scaling, Tx command's initial_rate_index 2074 * Rate info; when using rate-scaling, Tx command's initial_rate_idx
2093 * specifies 1st Tx rate attempted, via index into this table. 2075 * specifies 1st Tx rate attempted, via idx into this table.
2094 * 4965 devices works its way through table when retrying Tx. 2076 * 4965 devices works its way through table when retrying Tx.
2095 */ 2077 */
2096 struct { 2078 struct {
2097 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ 2079 __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */
2098 } rs_table[LINK_QUAL_MAX_RETRY_NUM]; 2080 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2099 __le32 reserved2; 2081 __le32 reserved2;
2100} __packed; 2082} __packed;
@@ -2117,13 +2099,13 @@ struct iwl_link_quality_cmd {
2117#define BT_MAX_KILL_DEF (0x5) 2099#define BT_MAX_KILL_DEF (0x5)
2118 2100
2119/* 2101/*
2120 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2102 * C_BT_CONFIG = 0x9b (command, has simple generic response)
2121 * 2103 *
2122 * 3945 and 4965 devices support hardware handshake with Bluetooth device on 2104 * 3945 and 4965 devices support hardware handshake with Bluetooth device on
2123 * same platform. Bluetooth device alerts wireless device when it will Tx; 2105 * same platform. Bluetooth device alerts wireless device when it will Tx;
2124 * wireless device can delay or kill its own Tx to accommodate. 2106 * wireless device can delay or kill its own Tx to accommodate.
2125 */ 2107 */
2126struct iwl_bt_cmd { 2108struct il_bt_cmd {
2127 u8 flags; 2109 u8 flags;
2128 u8 lead_time; 2110 u8 lead_time;
2129 u8 max_kill; 2111 u8 max_kill;
@@ -2132,7 +2114,6 @@ struct iwl_bt_cmd {
2132 __le32 kill_cts_mask; 2114 __le32 kill_cts_mask;
2133} __packed; 2115} __packed;
2134 2116
2135
2136/****************************************************************************** 2117/******************************************************************************
2137 * (6) 2118 * (6)
2138 * Spectrum Management (802.11h) Commands, Responses, Notifications: 2119 * Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2150,18 +2131,18 @@ struct iwl_bt_cmd {
2150 RXON_FILTER_ASSOC_MSK | \ 2131 RXON_FILTER_ASSOC_MSK | \
2151 RXON_FILTER_BCON_AWARE_MSK) 2132 RXON_FILTER_BCON_AWARE_MSK)
2152 2133
2153struct iwl_measure_channel { 2134struct il_measure_channel {
2154 __le32 duration; /* measurement duration in extended beacon 2135 __le32 duration; /* measurement duration in extended beacon
2155 * format */ 2136 * format */
2156 u8 channel; /* channel to measure */ 2137 u8 channel; /* channel to measure */
2157 u8 type; /* see enum iwl_measure_type */ 2138 u8 type; /* see enum il_measure_type */
2158 __le16 reserved; 2139 __le16 reserved;
2159} __packed; 2140} __packed;
2160 2141
2161/* 2142/*
2162 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) 2143 * C_SPECTRUM_MEASUREMENT = 0x74 (command)
2163 */ 2144 */
2164struct iwl_spectrum_cmd { 2145struct il_spectrum_cmd {
2165 __le16 len; /* number of bytes starting from token */ 2146 __le16 len; /* number of bytes starting from token */
2166 u8 token; /* token id */ 2147 u8 token; /* token id */
2167 u8 id; /* measurement id -- 0 or 1 */ 2148 u8 id; /* measurement id -- 0 or 1 */
@@ -2174,13 +2155,13 @@ struct iwl_spectrum_cmd {
2174 __le32 filter_flags; /* rxon filter flags */ 2155 __le32 filter_flags; /* rxon filter flags */
2175 __le16 channel_count; /* minimum 1, maximum 10 */ 2156 __le16 channel_count; /* minimum 1, maximum 10 */
2176 __le16 reserved3; 2157 __le16 reserved3;
2177 struct iwl_measure_channel channels[10]; 2158 struct il_measure_channel channels[10];
2178} __packed; 2159} __packed;
2179 2160
2180/* 2161/*
2181 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) 2162 * C_SPECTRUM_MEASUREMENT = 0x74 (response)
2182 */ 2163 */
2183struct iwl_spectrum_resp { 2164struct il_spectrum_resp {
2184 u8 token; 2165 u8 token;
2185 u8 id; /* id of the prior command replaced, or 0xff */ 2166 u8 id; /* id of the prior command replaced, or 0xff */
2186 __le16 status; /* 0 - command will be handled 2167 __le16 status; /* 0 - command will be handled
@@ -2188,57 +2169,57 @@ struct iwl_spectrum_resp {
2188 * measurement) */ 2169 * measurement) */
2189} __packed; 2170} __packed;
2190 2171
2191enum iwl_measurement_state { 2172enum il_measurement_state {
2192 IWL_MEASUREMENT_START = 0, 2173 IL_MEASUREMENT_START = 0,
2193 IWL_MEASUREMENT_STOP = 1, 2174 IL_MEASUREMENT_STOP = 1,
2194}; 2175};
2195 2176
2196enum iwl_measurement_status { 2177enum il_measurement_status {
2197 IWL_MEASUREMENT_OK = 0, 2178 IL_MEASUREMENT_OK = 0,
2198 IWL_MEASUREMENT_CONCURRENT = 1, 2179 IL_MEASUREMENT_CONCURRENT = 1,
2199 IWL_MEASUREMENT_CSA_CONFLICT = 2, 2180 IL_MEASUREMENT_CSA_CONFLICT = 2,
2200 IWL_MEASUREMENT_TGH_CONFLICT = 3, 2181 IL_MEASUREMENT_TGH_CONFLICT = 3,
2201 /* 4-5 reserved */ 2182 /* 4-5 reserved */
2202 IWL_MEASUREMENT_STOPPED = 6, 2183 IL_MEASUREMENT_STOPPED = 6,
2203 IWL_MEASUREMENT_TIMEOUT = 7, 2184 IL_MEASUREMENT_TIMEOUT = 7,
2204 IWL_MEASUREMENT_PERIODIC_FAILED = 8, 2185 IL_MEASUREMENT_PERIODIC_FAILED = 8,
2205}; 2186};
2206 2187
2207#define NUM_ELEMENTS_IN_HISTOGRAM 8 2188#define NUM_ELEMENTS_IN_HISTOGRAM 8
2208 2189
2209struct iwl_measurement_histogram { 2190struct il_measurement_histogram {
2210 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ 2191 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2211 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ 2192 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2212} __packed; 2193} __packed;
2213 2194
2214/* clear channel availability counters */ 2195/* clear channel availability counters */
2215struct iwl_measurement_cca_counters { 2196struct il_measurement_cca_counters {
2216 __le32 ofdm; 2197 __le32 ofdm;
2217 __le32 cck; 2198 __le32 cck;
2218} __packed; 2199} __packed;
2219 2200
2220enum iwl_measure_type { 2201enum il_measure_type {
2221 IWL_MEASURE_BASIC = (1 << 0), 2202 IL_MEASURE_BASIC = (1 << 0),
2222 IWL_MEASURE_CHANNEL_LOAD = (1 << 1), 2203 IL_MEASURE_CHANNEL_LOAD = (1 << 1),
2223 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), 2204 IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2224 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), 2205 IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2225 IWL_MEASURE_FRAME = (1 << 4), 2206 IL_MEASURE_FRAME = (1 << 4),
2226 /* bits 5:6 are reserved */ 2207 /* bits 5:6 are reserved */
2227 IWL_MEASURE_IDLE = (1 << 7), 2208 IL_MEASURE_IDLE = (1 << 7),
2228}; 2209};
2229 2210
2230/* 2211/*
2231 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) 2212 * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
2232 */ 2213 */
2233struct iwl_spectrum_notification { 2214struct il_spectrum_notification {
2234 u8 id; /* measurement id -- 0 or 1 */ 2215 u8 id; /* measurement id -- 0 or 1 */
2235 u8 token; 2216 u8 token;
2236 u8 channel_index; /* index in measurement channel list */ 2217 u8 channel_idx; /* idx in measurement channel list */
2237 u8 state; /* 0 - start, 1 - stop */ 2218 u8 state; /* 0 - start, 1 - stop */
2238 __le32 start_time; /* lower 32-bits of TSF */ 2219 __le32 start_time; /* lower 32-bits of TSF */
2239 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ 2220 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2240 u8 channel; 2221 u8 channel;
2241 u8 type; /* see enum iwl_measurement_type */ 2222 u8 type; /* see enum il_measurement_type */
2242 u8 reserved1; 2223 u8 reserved1;
2243 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only 2224 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2244 * valid if applicable for measurement type requested. */ 2225 * valid if applicable for measurement type requested. */
@@ -2248,9 +2229,9 @@ struct iwl_spectrum_notification {
2248 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - 2229 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2249 * unidentified */ 2230 * unidentified */
2250 u8 reserved2[3]; 2231 u8 reserved2[3];
2251 struct iwl_measurement_histogram histogram; 2232 struct il_measurement_histogram histogram;
2252 __le32 stop_time; /* lower 32-bits of TSF */ 2233 __le32 stop_time; /* lower 32-bits of TSF */
2253 __le32 status; /* see iwl_measurement_status */ 2234 __le32 status; /* see il_measurement_status */
2254} __packed; 2235} __packed;
2255 2236
2256/****************************************************************************** 2237/******************************************************************************
@@ -2260,10 +2241,10 @@ struct iwl_spectrum_notification {
2260 *****************************************************************************/ 2241 *****************************************************************************/
2261 2242
2262/** 2243/**
2263 * struct iwl_powertable_cmd - Power Table Command 2244 * struct il_powertable_cmd - Power Table Command
2264 * @flags: See below: 2245 * @flags: See below:
2265 * 2246 *
2266 * POWER_TABLE_CMD = 0x77 (command, has simple generic response) 2247 * C_POWER_TBL = 0x77 (command, has simple generic response)
2267 * 2248 *
2268 * PM allow: 2249 * PM allow:
2269 * bit 0 - '0' Driver not allow power management 2250 * bit 0 - '0' Driver not allow power management
@@ -2290,38 +2271,38 @@ struct iwl_spectrum_notification {
2290 * '10' force xtal sleep 2271 * '10' force xtal sleep
2291 * '11' Illegal set 2272 * '11' Illegal set
2292 * 2273 *
2293 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then 2274 * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
2294 * ucode assume sleep over DTIM is allowed and we don't need to wake up 2275 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2295 * for every DTIM. 2276 * for every DTIM.
2296 */ 2277 */
2297#define IWL_POWER_VEC_SIZE 5 2278#define IL_POWER_VEC_SIZE 5
2298 2279
2299#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) 2280#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2300#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) 2281#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2301 2282
2302struct iwl3945_powertable_cmd { 2283struct il3945_powertable_cmd {
2303 __le16 flags; 2284 __le16 flags;
2304 u8 reserved[2]; 2285 u8 reserved[2];
2305 __le32 rx_data_timeout; 2286 __le32 rx_data_timeout;
2306 __le32 tx_data_timeout; 2287 __le32 tx_data_timeout;
2307 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2288 __le32 sleep_interval[IL_POWER_VEC_SIZE];
2308} __packed; 2289} __packed;
2309 2290
2310struct iwl_powertable_cmd { 2291struct il_powertable_cmd {
2311 __le16 flags; 2292 __le16 flags;
2312 u8 keep_alive_seconds; /* 3945 reserved */ 2293 u8 keep_alive_seconds; /* 3945 reserved */
2313 u8 debug_flags; /* 3945 reserved */ 2294 u8 debug_flags; /* 3945 reserved */
2314 __le32 rx_data_timeout; 2295 __le32 rx_data_timeout;
2315 __le32 tx_data_timeout; 2296 __le32 tx_data_timeout;
2316 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2297 __le32 sleep_interval[IL_POWER_VEC_SIZE];
2317 __le32 keep_alive_beacons; 2298 __le32 keep_alive_beacons;
2318} __packed; 2299} __packed;
2319 2300
2320/* 2301/*
2321 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) 2302 * N_PM_SLEEP = 0x7A (notification only, not a command)
2322 * all devices identical. 2303 * all devices identical.
2323 */ 2304 */
2324struct iwl_sleep_notification { 2305struct il_sleep_notification {
2325 u8 pm_sleep_mode; 2306 u8 pm_sleep_mode;
2326 u8 pm_wakeup_src; 2307 u8 pm_wakeup_src;
2327 __le16 reserved; 2308 __le16 reserved;
@@ -2332,23 +2313,23 @@ struct iwl_sleep_notification {
2332 2313
2333/* Sleep states. all devices identical. */ 2314/* Sleep states. all devices identical. */
2334enum { 2315enum {
2335 IWL_PM_NO_SLEEP = 0, 2316 IL_PM_NO_SLEEP = 0,
2336 IWL_PM_SLP_MAC = 1, 2317 IL_PM_SLP_MAC = 1,
2337 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, 2318 IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2338 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, 2319 IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2339 IWL_PM_SLP_PHY = 4, 2320 IL_PM_SLP_PHY = 4,
2340 IWL_PM_SLP_REPENT = 5, 2321 IL_PM_SLP_REPENT = 5,
2341 IWL_PM_WAKEUP_BY_TIMER = 6, 2322 IL_PM_WAKEUP_BY_TIMER = 6,
2342 IWL_PM_WAKEUP_BY_DRIVER = 7, 2323 IL_PM_WAKEUP_BY_DRIVER = 7,
2343 IWL_PM_WAKEUP_BY_RFKILL = 8, 2324 IL_PM_WAKEUP_BY_RFKILL = 8,
2344 /* 3 reserved */ 2325 /* 3 reserved */
2345 IWL_PM_NUM_OF_MODES = 12, 2326 IL_PM_NUM_OF_MODES = 12,
2346}; 2327};
2347 2328
2348/* 2329/*
2349 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) 2330 * N_CARD_STATE = 0xa1 (notification only, not a command)
2350 */ 2331 */
2351struct iwl_card_state_notif { 2332struct il_card_state_notif {
2352 __le32 flags; 2333 __le32 flags;
2353} __packed; 2334} __packed;
2354 2335
@@ -2357,11 +2338,11 @@ struct iwl_card_state_notif {
2357#define CT_CARD_DISABLED 0x04 2338#define CT_CARD_DISABLED 0x04
2358#define RXON_CARD_DISABLED 0x10 2339#define RXON_CARD_DISABLED 0x10
2359 2340
2360struct iwl_ct_kill_config { 2341struct il_ct_kill_config {
2361 __le32 reserved; 2342 __le32 reserved;
2362 __le32 critical_temperature_M; 2343 __le32 critical_temperature_M;
2363 __le32 critical_temperature_R; 2344 __le32 critical_temperature_R;
2364} __packed; 2345} __packed;
2365 2346
2366/****************************************************************************** 2347/******************************************************************************
2367 * (8) 2348 * (8)
@@ -2373,7 +2354,7 @@ struct iwl_ct_kill_config {
2373#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) 2354#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2374 2355
2375/** 2356/**
2376 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table 2357 * struct il_scan_channel - entry in C_SCAN channel table
2377 * 2358 *
2378 * One for each channel in the scan list. 2359 * One for each channel in the scan list.
2379 * Each channel can independently select: 2360 * Each channel can independently select:
@@ -2383,7 +2364,7 @@ struct iwl_ct_kill_config {
2383 * quiet_plcp_th, good_CRC_th) 2364 * quiet_plcp_th, good_CRC_th)
2384 * 2365 *
2385 * To avoid uCode errors, make sure the following are true (see comments 2366 * To avoid uCode errors, make sure the following are true (see comments
2386 * under struct iwl_scan_cmd about max_out_time and quiet_time): 2367 * under struct il_scan_cmd about max_out_time and quiet_time):
2387 * 1) If using passive_dwell (i.e. passive_dwell != 0): 2368 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2388 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) 2369 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2389 * 2) quiet_time <= active_dwell 2370 * 2) quiet_time <= active_dwell
@@ -2391,7 +2372,7 @@ struct iwl_ct_kill_config {
2391 * passive_dwell < max_out_time 2372 * passive_dwell < max_out_time
2392 * active_dwell < max_out_time 2373 * active_dwell < max_out_time
2393 */ 2374 */
2394struct iwl3945_scan_channel { 2375struct il3945_scan_channel {
2395 /* 2376 /*
2396 * type is defined as: 2377 * type is defined as:
2397 * 0:0 1 = active, 0 = passive 2378 * 0:0 1 = active, 0 = passive
@@ -2400,16 +2381,16 @@ struct iwl3945_scan_channel {
2400 * 5:7 reserved 2381 * 5:7 reserved
2401 */ 2382 */
2402 u8 type; 2383 u8 type;
2403 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */ 2384 u8 channel; /* band is selected by il3945_scan_cmd "flags" field */
2404 struct iwl3945_tx_power tpc; 2385 struct il3945_tx_power tpc;
2405 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2386 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2406 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ 2387 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2407} __packed; 2388} __packed;
2408 2389
2409/* set number of direct probes u8 type */ 2390/* set number of direct probes u8 type */
2410#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) 2391#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2411 2392
2412struct iwl_scan_channel { 2393struct il_scan_channel {
2413 /* 2394 /*
2414 * type is defined as: 2395 * type is defined as:
2415 * 0:0 1 = active, 0 = passive 2396 * 0:0 1 = active, 0 = passive
@@ -2418,7 +2399,7 @@ struct iwl_scan_channel {
2418 * 21:31 reserved 2399 * 21:31 reserved
2419 */ 2400 */
2420 __le32 type; 2401 __le32 type;
2421 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ 2402 __le16 channel; /* band is selected by il_scan_cmd "flags" field */
2422 u8 tx_gain; /* gain for analog radio */ 2403 u8 tx_gain; /* gain for analog radio */
2423 u8 dsp_atten; /* gain for DSP */ 2404 u8 dsp_atten; /* gain for DSP */
2424 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2405 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
@@ -2426,17 +2407,17 @@ struct iwl_scan_channel {
2426} __packed; 2407} __packed;
2427 2408
2428/* set number of direct probes __le32 type */ 2409/* set number of direct probes __le32 type */
2429#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) 2410#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2430 2411
2431/** 2412/**
2432 * struct iwl_ssid_ie - directed scan network information element 2413 * struct il_ssid_ie - directed scan network information element
2433 * 2414 *
2434 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in 2415 * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
2435 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; 2416 * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
2436 * each channel may select different ssids from among the 20 (4) entries. 2417 * each channel may select different ssids from among the 20 (4) entries.
2437 * SSID IEs get transmitted in reverse order of entry. 2418 * SSID IEs get transmitted in reverse order of entry.
2438 */ 2419 */
2439struct iwl_ssid_ie { 2420struct il_ssid_ie {
2440 u8 id; 2421 u8 id;
2441 u8 len; 2422 u8 len;
2442 u8 ssid[32]; 2423 u8 ssid[32];
@@ -2445,14 +2426,14 @@ struct iwl_ssid_ie {
2445#define PROBE_OPTION_MAX_3945 4 2426#define PROBE_OPTION_MAX_3945 4
2446#define PROBE_OPTION_MAX 20 2427#define PROBE_OPTION_MAX 20
2447#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2428#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2448#define IWL_GOOD_CRC_TH_DISABLED 0 2429#define IL_GOOD_CRC_TH_DISABLED 0
2449#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) 2430#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2450#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) 2431#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2451#define IWL_MAX_SCAN_SIZE 1024 2432#define IL_MAX_SCAN_SIZE 1024
2452#define IWL_MAX_CMD_SIZE 4096 2433#define IL_MAX_CMD_SIZE 4096
2453 2434
2454/* 2435/*
2455 * REPLY_SCAN_CMD = 0x80 (command) 2436 * C_SCAN = 0x80 (command)
2456 * 2437 *
2457 * The hardware scan command is very powerful; the driver can set it up to 2438 * The hardware scan command is very powerful; the driver can set it up to
2458 * maintain (relatively) normal network traffic while doing a scan in the 2439 * maintain (relatively) normal network traffic while doing a scan in the
@@ -2501,10 +2482,10 @@ struct iwl_ssid_ie {
2501 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. 2482 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2502 * 2483 *
2503 * To avoid uCode errors, see timing restrictions described under 2484 * To avoid uCode errors, see timing restrictions described under
2504 * struct iwl_scan_channel. 2485 * struct il_scan_channel.
2505 */ 2486 */
2506 2487
2507struct iwl3945_scan_cmd { 2488struct il3945_scan_cmd {
2508 __le16 len; 2489 __le16 len;
2509 u8 reserved0; 2490 u8 reserved0;
2510 u8 channel_count; /* # channels in channel list */ 2491 u8 channel_count; /* # channels in channel list */
@@ -2525,10 +2506,10 @@ struct iwl3945_scan_cmd {
2525 2506
2526 /* For active scans (set to all-0s for passive scans). 2507 /* For active scans (set to all-0s for passive scans).
2527 * Does not include payload. Must specify Tx rate; no rate scaling. */ 2508 * Does not include payload. Must specify Tx rate; no rate scaling. */
2528 struct iwl3945_tx_cmd tx_cmd; 2509 struct il3945_tx_cmd tx_cmd;
2529 2510
2530 /* For directed active scans (set to all-0s otherwise) */ 2511 /* For directed active scans (set to all-0s otherwise) */
2531 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; 2512 struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2532 2513
2533 /* 2514 /*
2534 * Probe request frame, followed by channel list. 2515 * Probe request frame, followed by channel list.
@@ -2538,17 +2519,17 @@ struct iwl3945_scan_cmd {
2538 * Number of channels in list is specified by channel_count. 2519 * Number of channels in list is specified by channel_count.
2539 * Each channel in list is of type: 2520 * Each channel in list is of type:
2540 * 2521 *
2541 * struct iwl3945_scan_channel channels[0]; 2522 * struct il3945_scan_channel channels[0];
2542 * 2523 *
2543 * NOTE: Only one band of channels can be scanned per pass. You 2524 * NOTE: Only one band of channels can be scanned per pass. You
2544 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait 2525 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2545 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) 2526 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
2546 * before requesting another scan. 2527 * before requesting another scan.
2547 */ 2528 */
2548 u8 data[0]; 2529 u8 data[0];
2549} __packed; 2530} __packed;
2550 2531
2551struct iwl_scan_cmd { 2532struct il_scan_cmd {
2552 __le16 len; 2533 __le16 len;
2553 u8 reserved0; 2534 u8 reserved0;
2554 u8 channel_count; /* # channels in channel list */ 2535 u8 channel_count; /* # channels in channel list */
@@ -2569,10 +2550,10 @@ struct iwl_scan_cmd {
2569 2550
2570 /* For active scans (set to all-0s for passive scans). 2551 /* For active scans (set to all-0s for passive scans).
2571 * Does not include payload. Must specify Tx rate; no rate scaling. */ 2552 * Does not include payload. Must specify Tx rate; no rate scaling. */
2572 struct iwl_tx_cmd tx_cmd; 2553 struct il_tx_cmd tx_cmd;
2573 2554
2574 /* For directed active scans (set to all-0s otherwise) */ 2555 /* For directed active scans (set to all-0s otherwise) */
2575 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; 2556 struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
2576 2557
2577 /* 2558 /*
2578 * Probe request frame, followed by channel list. 2559 * Probe request frame, followed by channel list.
@@ -2582,11 +2563,11 @@ struct iwl_scan_cmd {
2582 * Number of channels in list is specified by channel_count. 2563 * Number of channels in list is specified by channel_count.
2583 * Each channel in list is of type: 2564 * Each channel in list is of type:
2584 * 2565 *
2585 * struct iwl_scan_channel channels[0]; 2566 * struct il_scan_channel channels[0];
2586 * 2567 *
2587 * NOTE: Only one band of channels can be scanned per pass. You 2568 * NOTE: Only one band of channels can be scanned per pass. You
2588 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait 2569 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2589 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) 2570 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
2590 * before requesting another scan. 2571 * before requesting another scan.
2591 */ 2572 */
2592 u8 data[0]; 2573 u8 data[0];
@@ -2598,16 +2579,16 @@ struct iwl_scan_cmd {
2598#define ABORT_STATUS 0x2 2579#define ABORT_STATUS 0x2
2599 2580
2600/* 2581/*
2601 * REPLY_SCAN_CMD = 0x80 (response) 2582 * C_SCAN = 0x80 (response)
2602 */ 2583 */
2603struct iwl_scanreq_notification { 2584struct il_scanreq_notification {
2604 __le32 status; /* 1: okay, 2: cannot fulfill request */ 2585 __le32 status; /* 1: okay, 2: cannot fulfill request */
2605} __packed; 2586} __packed;
2606 2587
2607/* 2588/*
2608 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) 2589 * N_SCAN_START = 0x82 (notification only, not a command)
2609 */ 2590 */
2610struct iwl_scanstart_notification { 2591struct il_scanstart_notification {
2611 __le32 tsf_low; 2592 __le32 tsf_low;
2612 __le32 tsf_high; 2593 __le32 tsf_high;
2613 __le32 beacon_timer; 2594 __le32 beacon_timer;
@@ -2620,30 +2601,30 @@ struct iwl_scanstart_notification {
2620#define SCAN_OWNER_STATUS 0x1 2601#define SCAN_OWNER_STATUS 0x1
2621#define MEASURE_OWNER_STATUS 0x2 2602#define MEASURE_OWNER_STATUS 0x2
2622 2603
2623#define IWL_PROBE_STATUS_OK 0 2604#define IL_PROBE_STATUS_OK 0
2624#define IWL_PROBE_STATUS_TX_FAILED BIT(0) 2605#define IL_PROBE_STATUS_TX_FAILED BIT(0)
2625/* error statuses combined with TX_FAILED */ 2606/* error statuses combined with TX_FAILED */
2626#define IWL_PROBE_STATUS_FAIL_TTL BIT(1) 2607#define IL_PROBE_STATUS_FAIL_TTL BIT(1)
2627#define IWL_PROBE_STATUS_FAIL_BT BIT(2) 2608#define IL_PROBE_STATUS_FAIL_BT BIT(2)
2628 2609
2629#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ 2610#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */
2630/* 2611/*
2631 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) 2612 * N_SCAN_RESULTS = 0x83 (notification only, not a command)
2632 */ 2613 */
2633struct iwl_scanresults_notification { 2614struct il_scanresults_notification {
2634 u8 channel; 2615 u8 channel;
2635 u8 band; 2616 u8 band;
2636 u8 probe_status; 2617 u8 probe_status;
2637 u8 num_probe_not_sent; /* not enough time to send */ 2618 u8 num_probe_not_sent; /* not enough time to send */
2638 __le32 tsf_low; 2619 __le32 tsf_low;
2639 __le32 tsf_high; 2620 __le32 tsf_high;
2640 __le32 statistics[NUMBER_OF_STATISTICS]; 2621 __le32 stats[NUMBER_OF_STATS];
2641} __packed; 2622} __packed;
2642 2623
2643/* 2624/*
2644 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) 2625 * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
2645 */ 2626 */
2646struct iwl_scancomplete_notification { 2627struct il_scancomplete_notification {
2647 u8 scanned_channels; 2628 u8 scanned_channels;
2648 u8 status; 2629 u8 status;
2649 u8 last_channel; 2630 u8 last_channel;
@@ -2651,50 +2632,49 @@ struct iwl_scancomplete_notification {
2651 __le32 tsf_high; 2632 __le32 tsf_high;
2652} __packed; 2633} __packed;
2653 2634
2654
2655/****************************************************************************** 2635/******************************************************************************
2656 * (9) 2636 * (9)
2657 * IBSS/AP Commands and Notifications: 2637 * IBSS/AP Commands and Notifications:
2658 * 2638 *
2659 *****************************************************************************/ 2639 *****************************************************************************/
2660 2640
2661enum iwl_ibss_manager { 2641enum il_ibss_manager {
2662 IWL_NOT_IBSS_MANAGER = 0, 2642 IL_NOT_IBSS_MANAGER = 0,
2663 IWL_IBSS_MANAGER = 1, 2643 IL_IBSS_MANAGER = 1,
2664}; 2644};
2665 2645
2666/* 2646/*
2667 * BEACON_NOTIFICATION = 0x90 (notification only, not a command) 2647 * N_BEACON = 0x90 (notification only, not a command)
2668 */ 2648 */
2669 2649
2670struct iwl3945_beacon_notif { 2650struct il3945_beacon_notif {
2671 struct iwl3945_tx_resp beacon_notify_hdr; 2651 struct il3945_tx_resp beacon_notify_hdr;
2672 __le32 low_tsf; 2652 __le32 low_tsf;
2673 __le32 high_tsf; 2653 __le32 high_tsf;
2674 __le32 ibss_mgr_status; 2654 __le32 ibss_mgr_status;
2675} __packed; 2655} __packed;
2676 2656
2677struct iwl4965_beacon_notif { 2657struct il4965_beacon_notif {
2678 struct iwl4965_tx_resp beacon_notify_hdr; 2658 struct il4965_tx_resp beacon_notify_hdr;
2679 __le32 low_tsf; 2659 __le32 low_tsf;
2680 __le32 high_tsf; 2660 __le32 high_tsf;
2681 __le32 ibss_mgr_status; 2661 __le32 ibss_mgr_status;
2682} __packed; 2662} __packed;
2683 2663
2684/* 2664/*
2685 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2665 * C_TX_BEACON= 0x91 (command, has simple generic response)
2686 */ 2666 */
2687 2667
2688struct iwl3945_tx_beacon_cmd { 2668struct il3945_tx_beacon_cmd {
2689 struct iwl3945_tx_cmd tx; 2669 struct il3945_tx_cmd tx;
2690 __le16 tim_idx; 2670 __le16 tim_idx;
2691 u8 tim_size; 2671 u8 tim_size;
2692 u8 reserved1; 2672 u8 reserved1;
2693 struct ieee80211_hdr frame[0]; /* beacon frame */ 2673 struct ieee80211_hdr frame[0]; /* beacon frame */
2694} __packed; 2674} __packed;
2695 2675
2696struct iwl_tx_beacon_cmd { 2676struct il_tx_beacon_cmd {
2697 struct iwl_tx_cmd tx; 2677 struct il_tx_cmd tx;
2698 __le16 tim_idx; 2678 __le16 tim_idx;
2699 u8 tim_size; 2679 u8 tim_size;
2700 u8 reserved1; 2680 u8 reserved1;
@@ -2707,7 +2687,7 @@ struct iwl_tx_beacon_cmd {
2707 * 2687 *
2708 *****************************************************************************/ 2688 *****************************************************************************/
2709 2689
2710#define IWL_TEMP_CONVERT 260 2690#define IL_TEMP_CONVERT 260
2711 2691
2712#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 2692#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2713#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 2693#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
@@ -2727,9 +2707,9 @@ struct rate_histogram {
2727 } failed; 2707 } failed;
2728} __packed; 2708} __packed;
2729 2709
2730/* statistics command response */ 2710/* stats command response */
2731 2711
2732struct iwl39_statistics_rx_phy { 2712struct iwl39_stats_rx_phy {
2733 __le32 ina_cnt; 2713 __le32 ina_cnt;
2734 __le32 fina_cnt; 2714 __le32 fina_cnt;
2735 __le32 plcp_err; 2715 __le32 plcp_err;
@@ -2747,7 +2727,7 @@ struct iwl39_statistics_rx_phy {
2747 __le32 sent_cts_cnt; 2727 __le32 sent_cts_cnt;
2748} __packed; 2728} __packed;
2749 2729
2750struct iwl39_statistics_rx_non_phy { 2730struct iwl39_stats_rx_non_phy {
2751 __le32 bogus_cts; /* CTS received when not expecting CTS */ 2731 __le32 bogus_cts; /* CTS received when not expecting CTS */
2752 __le32 bogus_ack; /* ACK received when not expecting ACK */ 2732 __le32 bogus_ack; /* ACK received when not expecting ACK */
2753 __le32 non_bssid_frames; /* number of frames with BSSID that 2733 __le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2758,13 +2738,13 @@ struct iwl39_statistics_rx_non_phy {
2758 * our serving channel */ 2738 * our serving channel */
2759} __packed; 2739} __packed;
2760 2740
2761struct iwl39_statistics_rx { 2741struct iwl39_stats_rx {
2762 struct iwl39_statistics_rx_phy ofdm; 2742 struct iwl39_stats_rx_phy ofdm;
2763 struct iwl39_statistics_rx_phy cck; 2743 struct iwl39_stats_rx_phy cck;
2764 struct iwl39_statistics_rx_non_phy general; 2744 struct iwl39_stats_rx_non_phy general;
2765} __packed; 2745} __packed;
2766 2746
2767struct iwl39_statistics_tx { 2747struct iwl39_stats_tx {
2768 __le32 preamble_cnt; 2748 __le32 preamble_cnt;
2769 __le32 rx_detected_cnt; 2749 __le32 rx_detected_cnt;
2770 __le32 bt_prio_defer_cnt; 2750 __le32 bt_prio_defer_cnt;
@@ -2776,31 +2756,31 @@ struct iwl39_statistics_tx {
2776 __le32 actual_ack_cnt; 2756 __le32 actual_ack_cnt;
2777} __packed; 2757} __packed;
2778 2758
2779struct statistics_dbg { 2759struct stats_dbg {
2780 __le32 burst_check; 2760 __le32 burst_check;
2781 __le32 burst_count; 2761 __le32 burst_count;
2782 __le32 wait_for_silence_timeout_cnt; 2762 __le32 wait_for_silence_timeout_cnt;
2783 __le32 reserved[3]; 2763 __le32 reserved[3];
2784} __packed; 2764} __packed;
2785 2765
2786struct iwl39_statistics_div { 2766struct iwl39_stats_div {
2787 __le32 tx_on_a; 2767 __le32 tx_on_a;
2788 __le32 tx_on_b; 2768 __le32 tx_on_b;
2789 __le32 exec_time; 2769 __le32 exec_time;
2790 __le32 probe_time; 2770 __le32 probe_time;
2791} __packed; 2771} __packed;
2792 2772
2793struct iwl39_statistics_general { 2773struct iwl39_stats_general {
2794 __le32 temperature; 2774 __le32 temperature;
2795 struct statistics_dbg dbg; 2775 struct stats_dbg dbg;
2796 __le32 sleep_time; 2776 __le32 sleep_time;
2797 __le32 slots_out; 2777 __le32 slots_out;
2798 __le32 slots_idle; 2778 __le32 slots_idle;
2799 __le32 ttl_timestamp; 2779 __le32 ttl_timestamp;
2800 struct iwl39_statistics_div div; 2780 struct iwl39_stats_div div;
2801} __packed; 2781} __packed;
2802 2782
2803struct statistics_rx_phy { 2783struct stats_rx_phy {
2804 __le32 ina_cnt; 2784 __le32 ina_cnt;
2805 __le32 fina_cnt; 2785 __le32 fina_cnt;
2806 __le32 plcp_err; 2786 __le32 plcp_err;
@@ -2823,7 +2803,7 @@ struct statistics_rx_phy {
2823 __le32 reserved3; 2803 __le32 reserved3;
2824} __packed; 2804} __packed;
2825 2805
2826struct statistics_rx_ht_phy { 2806struct stats_rx_ht_phy {
2827 __le32 plcp_err; 2807 __le32 plcp_err;
2828 __le32 overrun_err; 2808 __le32 overrun_err;
2829 __le32 early_overrun_err; 2809 __le32 early_overrun_err;
@@ -2838,7 +2818,7 @@ struct statistics_rx_ht_phy {
2838 2818
2839#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) 2819#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2840 2820
2841struct statistics_rx_non_phy { 2821struct stats_rx_non_phy {
2842 __le32 bogus_cts; /* CTS received when not expecting CTS */ 2822 __le32 bogus_cts; /* CTS received when not expecting CTS */
2843 __le32 bogus_ack; /* ACK received when not expecting ACK */ 2823 __le32 bogus_ack; /* ACK received when not expecting ACK */
2844 __le32 non_bssid_frames; /* number of frames with BSSID that 2824 __le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2852,15 +2832,15 @@ struct statistics_rx_non_phy {
2852 __le32 num_missed_bcon; /* number of missed beacons */ 2832 __le32 num_missed_bcon; /* number of missed beacons */
2853 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the 2833 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2854 * ADC was in saturation */ 2834 * ADC was in saturation */
2855 __le32 ina_detection_search_time;/* total time (in 0.8us) searched 2835 __le32 ina_detection_search_time; /* total time (in 0.8us) searched
2856 * for INA */ 2836 * for INA */
2857 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ 2837 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2858 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ 2838 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2859 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ 2839 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2860 __le32 interference_data_flag; /* flag for interference data 2840 __le32 interference_data_flag; /* flag for interference data
2861 * availability. 1 when data is 2841 * availability. 1 when data is
2862 * available. */ 2842 * available. */
2863 __le32 channel_load; /* counts RX Enable time in uSec */ 2843 __le32 channel_load; /* counts RX Enable time in uSec */
2864 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM 2844 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2865 * and CCK) counter */ 2845 * and CCK) counter */
2866 __le32 beacon_rssi_a; 2846 __le32 beacon_rssi_a;
@@ -2871,28 +2851,28 @@ struct statistics_rx_non_phy {
2871 __le32 beacon_energy_c; 2851 __le32 beacon_energy_c;
2872} __packed; 2852} __packed;
2873 2853
2874struct statistics_rx { 2854struct stats_rx {
2875 struct statistics_rx_phy ofdm; 2855 struct stats_rx_phy ofdm;
2876 struct statistics_rx_phy cck; 2856 struct stats_rx_phy cck;
2877 struct statistics_rx_non_phy general; 2857 struct stats_rx_non_phy general;
2878 struct statistics_rx_ht_phy ofdm_ht; 2858 struct stats_rx_ht_phy ofdm_ht;
2879} __packed; 2859} __packed;
2880 2860
2881/** 2861/**
2882 * struct statistics_tx_power - current tx power 2862 * struct stats_tx_power - current tx power
2883 * 2863 *
2884 * @ant_a: current tx power on chain a in 1/2 dB step 2864 * @ant_a: current tx power on chain a in 1/2 dB step
2885 * @ant_b: current tx power on chain b in 1/2 dB step 2865 * @ant_b: current tx power on chain b in 1/2 dB step
2886 * @ant_c: current tx power on chain c in 1/2 dB step 2866 * @ant_c: current tx power on chain c in 1/2 dB step
2887 */ 2867 */
2888struct statistics_tx_power { 2868struct stats_tx_power {
2889 u8 ant_a; 2869 u8 ant_a;
2890 u8 ant_b; 2870 u8 ant_b;
2891 u8 ant_c; 2871 u8 ant_c;
2892 u8 reserved; 2872 u8 reserved;
2893} __packed; 2873} __packed;
2894 2874
2895struct statistics_tx_non_phy_agg { 2875struct stats_tx_non_phy_agg {
2896 __le32 ba_timeout; 2876 __le32 ba_timeout;
2897 __le32 ba_reschedule_frames; 2877 __le32 ba_reschedule_frames;
2898 __le32 scd_query_agg_frame_cnt; 2878 __le32 scd_query_agg_frame_cnt;
@@ -2905,7 +2885,7 @@ struct statistics_tx_non_phy_agg {
2905 __le32 rx_ba_rsp_cnt; 2885 __le32 rx_ba_rsp_cnt;
2906} __packed; 2886} __packed;
2907 2887
2908struct statistics_tx { 2888struct stats_tx {
2909 __le32 preamble_cnt; 2889 __le32 preamble_cnt;
2910 __le32 rx_detected_cnt; 2890 __le32 rx_detected_cnt;
2911 __le32 bt_prio_defer_cnt; 2891 __le32 bt_prio_defer_cnt;
@@ -2920,13 +2900,12 @@ struct statistics_tx {
2920 __le32 burst_abort_missing_next_frame_cnt; 2900 __le32 burst_abort_missing_next_frame_cnt;
2921 __le32 cts_timeout_collision; 2901 __le32 cts_timeout_collision;
2922 __le32 ack_or_ba_timeout_collision; 2902 __le32 ack_or_ba_timeout_collision;
2923 struct statistics_tx_non_phy_agg agg; 2903 struct stats_tx_non_phy_agg agg;
2924 2904
2925 __le32 reserved1; 2905 __le32 reserved1;
2926} __packed; 2906} __packed;
2927 2907
2928 2908struct stats_div {
2929struct statistics_div {
2930 __le32 tx_on_a; 2909 __le32 tx_on_a;
2931 __le32 tx_on_b; 2910 __le32 tx_on_b;
2932 __le32 exec_time; 2911 __le32 exec_time;
@@ -2935,14 +2914,14 @@ struct statistics_div {
2935 __le32 reserved2; 2914 __le32 reserved2;
2936} __packed; 2915} __packed;
2937 2916
2938struct statistics_general_common { 2917struct stats_general_common {
2939 __le32 temperature; /* radio temperature */ 2918 __le32 temperature; /* radio temperature */
2940 struct statistics_dbg dbg; 2919 struct stats_dbg dbg;
2941 __le32 sleep_time; 2920 __le32 sleep_time;
2942 __le32 slots_out; 2921 __le32 slots_out;
2943 __le32 slots_idle; 2922 __le32 slots_idle;
2944 __le32 ttl_timestamp; 2923 __le32 ttl_timestamp;
2945 struct statistics_div div; 2924 struct stats_div div;
2946 __le32 rx_enable_counter; 2925 __le32 rx_enable_counter;
2947 /* 2926 /*
2948 * num_of_sos_states: 2927 * num_of_sos_states:
@@ -2952,73 +2931,73 @@ struct statistics_general_common {
2952 __le32 num_of_sos_states; 2931 __le32 num_of_sos_states;
2953} __packed; 2932} __packed;
2954 2933
2955struct statistics_general { 2934struct stats_general {
2956 struct statistics_general_common common; 2935 struct stats_general_common common;
2957 __le32 reserved2; 2936 __le32 reserved2;
2958 __le32 reserved3; 2937 __le32 reserved3;
2959} __packed; 2938} __packed;
2960 2939
2961#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) 2940#define UCODE_STATS_CLEAR_MSK (0x1 << 0)
2962#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) 2941#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1)
2963#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) 2942#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2)
2964 2943
2965/* 2944/*
2966 * REPLY_STATISTICS_CMD = 0x9c, 2945 * C_STATS = 0x9c,
2967 * all devices identical. 2946 * all devices identical.
2968 * 2947 *
2969 * This command triggers an immediate response containing uCode statistics. 2948 * This command triggers an immediate response containing uCode stats.
2970 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. 2949 * The response is in the same format as N_STATS 0x9d, below.
2971 * 2950 *
2972 * If the CLEAR_STATS configuration flag is set, uCode will clear its 2951 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2973 * internal copy of the statistics (counters) after issuing the response. 2952 * internal copy of the stats (counters) after issuing the response.
2974 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). 2953 * This flag does not affect N_STATSs after beacons (see below).
2975 * 2954 *
2976 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue 2955 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2977 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag 2956 * N_STATSs after received beacons (see below). This flag
2978 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. 2957 * does not affect the response to the C_STATS 0x9c itself.
2979 */ 2958 */
2980#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ 2959#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2981#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ 2960#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */
2982struct iwl_statistics_cmd { 2961struct il_stats_cmd {
2983 __le32 configuration_flags; /* IWL_STATS_CONF_* */ 2962 __le32 configuration_flags; /* IL_STATS_CONF_* */
2984} __packed; 2963} __packed;
2985 2964
2986/* 2965/*
2987 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) 2966 * N_STATS = 0x9d (notification only, not a command)
2988 * 2967 *
2989 * By default, uCode issues this notification after receiving a beacon 2968 * By default, uCode issues this notification after receiving a beacon
2990 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the 2969 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2991 * REPLY_STATISTICS_CMD 0x9c, above. 2970 * C_STATS 0x9c, above.
2992 * 2971 *
2993 * Statistics counters continue to increment beacon after beacon, but are 2972 * Statistics counters continue to increment beacon after beacon, but are
2994 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD 2973 * cleared when changing channels or when driver issues C_STATS
2995 * 0x9c with CLEAR_STATS bit set (see above). 2974 * 0x9c with CLEAR_STATS bit set (see above).
2996 * 2975 *
2997 * uCode also issues this notification during scans. uCode clears statistics 2976 * uCode also issues this notification during scans. uCode clears stats
2998 * appropriately so that each notification contains statistics for only the 2977 * appropriately so that each notification contains stats for only the
2999 * one channel that has just been scanned. 2978 * one channel that has just been scanned.
3000 */ 2979 */
3001#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) 2980#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
3002#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) 2981#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
3003 2982
3004struct iwl3945_notif_statistics { 2983struct il3945_notif_stats {
3005 __le32 flag; 2984 __le32 flag;
3006 struct iwl39_statistics_rx rx; 2985 struct iwl39_stats_rx rx;
3007 struct iwl39_statistics_tx tx; 2986 struct iwl39_stats_tx tx;
3008 struct iwl39_statistics_general general; 2987 struct iwl39_stats_general general;
3009} __packed; 2988} __packed;
3010 2989
3011struct iwl_notif_statistics { 2990struct il_notif_stats {
3012 __le32 flag; 2991 __le32 flag;
3013 struct statistics_rx rx; 2992 struct stats_rx rx;
3014 struct statistics_tx tx; 2993 struct stats_tx tx;
3015 struct statistics_general general; 2994 struct stats_general general;
3016} __packed; 2995} __packed;
3017 2996
3018/* 2997/*
3019 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) 2998 * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
3020 * 2999 *
3021 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed 3000 * uCode send N_MISSED_BEACONS to driver when detect beacon missed
3022 * in regardless of how many missed beacons, which mean when driver receive the 3001 * in regardless of how many missed beacons, which mean when driver receive the
3023 * notification, inside the command, it can find all the beacons information 3002 * notification, inside the command, it can find all the beacons information
3024 * which include number of total missed beacons, number of consecutive missed 3003 * which include number of total missed beacons, number of consecutive missed
@@ -3035,18 +3014,17 @@ struct iwl_notif_statistics {
3035 * 3014 *
3036 */ 3015 */
3037 3016
3038#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) 3017#define IL_MISSED_BEACON_THRESHOLD_MIN (1)
3039#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) 3018#define IL_MISSED_BEACON_THRESHOLD_DEF (5)
3040#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF 3019#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF
3041 3020
3042struct iwl_missed_beacon_notif { 3021struct il_missed_beacon_notif {
3043 __le32 consecutive_missed_beacons; 3022 __le32 consecutive_missed_beacons;
3044 __le32 total_missed_becons; 3023 __le32 total_missed_becons;
3045 __le32 num_expected_beacons; 3024 __le32 num_expected_beacons;
3046 __le32 num_recvd_beacons; 3025 __le32 num_recvd_beacons;
3047} __packed; 3026} __packed;
3048 3027
3049
3050/****************************************************************************** 3028/******************************************************************************
3051 * (11) 3029 * (11)
3052 * Rx Calibration Commands: 3030 * Rx Calibration Commands:
@@ -3062,7 +3040,7 @@ struct iwl_missed_beacon_notif {
3062 *****************************************************************************/ 3040 *****************************************************************************/
3063 3041
3064/** 3042/**
3065 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) 3043 * C_SENSITIVITY = 0xa8 (command, has simple generic response)
3066 * 3044 *
3067 * This command sets up the Rx signal detector for a sensitivity level that 3045 * This command sets up the Rx signal detector for a sensitivity level that
3068 * is high enough to lock onto all signals within the associated network, 3046 * is high enough to lock onto all signals within the associated network,
@@ -3076,12 +3054,12 @@ struct iwl_missed_beacon_notif {
3076 * time listening, not transmitting). Driver must adjust sensitivity so that 3054 * time listening, not transmitting). Driver must adjust sensitivity so that
3077 * the ratio of actual false alarms to actual Rx time falls within this range. 3055 * the ratio of actual false alarms to actual Rx time falls within this range.
3078 * 3056 *
3079 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each 3057 * While associated, uCode delivers N_STATSs after each
3080 * received beacon. These provide information to the driver to analyze the 3058 * received beacon. These provide information to the driver to analyze the
3081 * sensitivity. Don't analyze statistics that come in from scanning, or any 3059 * sensitivity. Don't analyze stats that come in from scanning, or any
3082 * other non-associated-network source. Pertinent statistics include: 3060 * other non-associated-network source. Pertinent stats include:
3083 * 3061 *
3084 * From "general" statistics (struct statistics_rx_non_phy): 3062 * From "general" stats (struct stats_rx_non_phy):
3085 * 3063 *
3086 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) 3064 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
3087 * Measure of energy of desired signal. Used for establishing a level 3065 * Measure of energy of desired signal. Used for establishing a level
@@ -3094,7 +3072,7 @@ struct iwl_missed_beacon_notif {
3094 * uSecs of actual Rx time during beacon period (varies according to 3072 * uSecs of actual Rx time during beacon period (varies according to
3095 * how much time was spent transmitting). 3073 * how much time was spent transmitting).
3096 * 3074 *
3097 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: 3075 * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
3098 * 3076 *
3099 * false_alarm_cnt 3077 * false_alarm_cnt
3100 * Signal locks abandoned early (before phy-level header). 3078 * Signal locks abandoned early (before phy-level header).
@@ -3111,15 +3089,15 @@ struct iwl_missed_beacon_notif {
3111 * 3089 *
3112 * Total number of false alarms = false_alarms + plcp_errs 3090 * Total number of false alarms = false_alarms + plcp_errs
3113 * 3091 *
3114 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd 3092 * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
3115 * (notice that the start points for OFDM are at or close to settings for 3093 * (notice that the start points for OFDM are at or close to settings for
3116 * maximum sensitivity): 3094 * maximum sensitivity):
3117 * 3095 *
3118 * START / MIN / MAX 3096 * START / MIN / MAX
3119 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 3097 * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120
3120 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 3098 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210
3121 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 3099 * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140
3122 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 3100 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270
3123 * 3101 *
3124 * If actual rate of OFDM false alarms (+ plcp_errors) is too high 3102 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
3125 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity 3103 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
@@ -3152,30 +3130,30 @@ struct iwl_missed_beacon_notif {
3152 * Reset this to 0 at the first beacon period that falls within the 3130 * Reset this to 0 at the first beacon period that falls within the
3153 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). 3131 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
3154 * 3132 *
3155 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd 3133 * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
3156 * (notice that the start points for CCK are at maximum sensitivity): 3134 * (notice that the start points for CCK are at maximum sensitivity):
3157 * 3135 *
3158 * START / MIN / MAX 3136 * START / MIN / MAX
3159 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 3137 * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200
3160 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 3138 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400
3161 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 3139 * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100
3162 * 3140 *
3163 * If actual rate of CCK false alarms (+ plcp_errors) is too high 3141 * If actual rate of CCK false alarms (+ plcp_errors) is too high
3164 * (greater than 50 for each 204.8 msecs listening), method for reducing 3142 * (greater than 50 for each 204.8 msecs listening), method for reducing
3165 * sensitivity is: 3143 * sensitivity is:
3166 * 3144 *
3167 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, 3145 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
3168 * up to max 400. 3146 * up to max 400.
3169 * 3147 *
3170 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, 3148 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
3171 * sensitivity has been reduced a significant amount; bring it up to 3149 * sensitivity has been reduced a significant amount; bring it up to
3172 * a moderate 161. Otherwise, *add* 3, up to max 200. 3150 * a moderate 161. Otherwise, *add* 3, up to max 200.
3173 * 3151 *
3174 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, 3152 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
3175 * sensitivity has been reduced only a moderate or small amount; 3153 * sensitivity has been reduced only a moderate or small amount;
3176 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, 3154 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
3177 * down to min 0. Otherwise (if gain has been significantly reduced), 3155 * down to min 0. Otherwise (if gain has been significantly reduced),
3178 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. 3156 * don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
3179 * 3157 *
3180 * b) Save a snapshot of the "silence reference". 3158 * b) Save a snapshot of the "silence reference".
3181 * 3159 *
@@ -3191,13 +3169,13 @@ struct iwl_missed_beacon_notif {
3191 * 3169 *
3192 * Method for increasing sensitivity: 3170 * Method for increasing sensitivity:
3193 * 3171 *
3194 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, 3172 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
3195 * down to min 125. 3173 * down to min 125.
3196 * 3174 *
3197 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, 3175 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
3198 * down to min 200. 3176 * down to min 200.
3199 * 3177 *
3200 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. 3178 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
3201 * 3179 *
3202 * If actual rate of CCK false alarms (+ plcp_errors) is within good range 3180 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3203 * (between 5 and 50 for each 204.8 msecs listening): 3181 * (between 5 and 50 for each 204.8 msecs listening):
@@ -3206,57 +3184,56 @@ struct iwl_missed_beacon_notif {
3206 * 3184 *
3207 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), 3185 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3208 * give some extra margin to energy threshold by *subtracting* 8 3186 * give some extra margin to energy threshold by *subtracting* 8
3209 * from value in HD_MIN_ENERGY_CCK_DET_INDEX. 3187 * from value in HD_MIN_ENERGY_CCK_DET_IDX.
3210 * 3188 *
3211 * For all cases (too few, too many, good range), make sure that the CCK 3189 * For all cases (too few, too many, good range), make sure that the CCK
3212 * detection threshold (energy) is below the energy level for robust 3190 * detection threshold (energy) is below the energy level for robust
3213 * detection over the past 10 beacon periods, the "Max cck energy". 3191 * detection over the past 10 beacon periods, the "Max cck energy".
3214 * Lower values mean higher energy; this means making sure that the value 3192 * Lower values mean higher energy; this means making sure that the value
3215 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". 3193 * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
3216 * 3194 *
3217 */ 3195 */
3218 3196
3219/* 3197/*
3220 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) 3198 * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
3221 */ 3199 */
3222#define HD_TABLE_SIZE (11) /* number of entries */ 3200#define HD_TBL_SIZE (11) /* number of entries */
3223#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ 3201#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */
3224#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) 3202#define HD_MIN_ENERGY_OFDM_DET_IDX (1)
3225#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) 3203#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2)
3226#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) 3204#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3)
3227#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) 3205#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4)
3228#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) 3206#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5)
3229#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) 3207#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6)
3230#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) 3208#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7)
3231#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) 3209#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8)
3232#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) 3210#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9)
3233#define HD_OFDM_ENERGY_TH_IN_INDEX (10) 3211#define HD_OFDM_ENERGY_TH_IN_IDX (10)
3234 3212
3235/* Control field in struct iwl_sensitivity_cmd */ 3213/* Control field in struct il_sensitivity_cmd */
3236#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) 3214#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0)
3237#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) 3215#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1)
3238 3216
3239/** 3217/**
3240 * struct iwl_sensitivity_cmd 3218 * struct il_sensitivity_cmd
3241 * @control: (1) updates working table, (0) updates default table 3219 * @control: (1) updates working table, (0) updates default table
3242 * @table: energy threshold values, use HD_* as index into table 3220 * @table: energy threshold values, use HD_* as idx into table
3243 * 3221 *
3244 * Always use "1" in "control" to update uCode's working table and DSP. 3222 * Always use "1" in "control" to update uCode's working table and DSP.
3245 */ 3223 */
3246struct iwl_sensitivity_cmd { 3224struct il_sensitivity_cmd {
3247 __le16 control; /* always use "1" */ 3225 __le16 control; /* always use "1" */
3248 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ 3226 __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */
3249} __packed; 3227} __packed;
3250 3228
3251
3252/** 3229/**
3253 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) 3230 * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
3254 * 3231 *
3255 * This command sets the relative gains of 4965 device's 3 radio receiver chains. 3232 * This command sets the relative gains of 4965 device's 3 radio receiver chains.
3256 * 3233 *
3257 * After the first association, driver should accumulate signal and noise 3234 * After the first association, driver should accumulate signal and noise
3258 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 3235 * stats from the N_STATSs that follow the first 20
3259 * beacons from the associated network (don't collect statistics that come 3236 * beacons from the associated network (don't collect stats that come
3260 * in from scanning, or any other non-network source). 3237 * in from scanning, or any other non-network source).
3261 * 3238 *
3262 * DISCONNECTED ANTENNA: 3239 * DISCONNECTED ANTENNA:
@@ -3264,7 +3241,7 @@ struct iwl_sensitivity_cmd {
3264 * Driver should determine which antennas are actually connected, by comparing 3241 * Driver should determine which antennas are actually connected, by comparing
3265 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the 3242 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3266 * following values over 20 beacons, one accumulator for each of the chains 3243 * following values over 20 beacons, one accumulator for each of the chains
3267 * a/b/c, from struct statistics_rx_non_phy: 3244 * a/b/c, from struct stats_rx_non_phy:
3268 * 3245 *
3269 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) 3246 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3270 * 3247 *
@@ -3283,7 +3260,7 @@ struct iwl_sensitivity_cmd {
3283 * to antennas, see above) for gain, by comparing the average signal levels 3260 * to antennas, see above) for gain, by comparing the average signal levels
3284 * detected during the silence after each beacon (background noise). 3261 * detected during the silence after each beacon (background noise).
3285 * Accumulate (add) the following values over 20 beacons, one accumulator for 3262 * Accumulate (add) the following values over 20 beacons, one accumulator for
3286 * each of the chains a/b/c, from struct statistics_rx_non_phy: 3263 * each of the chains a/b/c, from struct stats_rx_non_phy:
3287 * 3264 *
3288 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) 3265 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3289 * 3266 *
@@ -3294,7 +3271,7 @@ struct iwl_sensitivity_cmd {
3294 * (accum_noise[i] - accum_noise[reference]) / 30 3271 * (accum_noise[i] - accum_noise[reference]) / 30
3295 * 3272 *
3296 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. 3273 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3297 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the 3274 * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
3298 * driver should limit the difference results to a range of 0-3 (0-4.5 dB), 3275 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3299 * and set bit 2 to indicate "reduce gain". The value for the reference 3276 * and set bit 2 to indicate "reduce gain". The value for the reference
3300 * (weakest) chain should be "0". 3277 * (weakest) chain should be "0".
@@ -3306,24 +3283,24 @@ struct iwl_sensitivity_cmd {
3306 3283
3307/* Phy calibration command for series */ 3284/* Phy calibration command for series */
3308/* The default calibrate table size if not specified by firmware */ 3285/* The default calibrate table size if not specified by firmware */
3309#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 3286#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3310enum { 3287enum {
3311 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, 3288 IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3312 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19, 3289 IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3313}; 3290};
3314 3291
3315#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) 3292#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3316 3293
3317struct iwl_calib_hdr { 3294struct il_calib_hdr {
3318 u8 op_code; 3295 u8 op_code;
3319 u8 first_group; 3296 u8 first_group;
3320 u8 groups_num; 3297 u8 groups_num;
3321 u8 data_valid; 3298 u8 data_valid;
3322} __packed; 3299} __packed;
3323 3300
3324/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ 3301/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3325struct iwl_calib_diff_gain_cmd { 3302struct il_calib_diff_gain_cmd {
3326 struct iwl_calib_hdr hdr; 3303 struct il_calib_hdr hdr;
3327 s8 diff_gain_a; /* see above */ 3304 s8 diff_gain_a; /* see above */
3328 s8 diff_gain_b; 3305 s8 diff_gain_b;
3329 s8 diff_gain_c; 3306 s8 diff_gain_c;
@@ -3338,12 +3315,12 @@ struct iwl_calib_diff_gain_cmd {
3338 3315
3339/* 3316/*
3340 * LEDs Command & Response 3317 * LEDs Command & Response
3341 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) 3318 * C_LEDS = 0x48 (command, has simple generic response)
3342 * 3319 *
3343 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), 3320 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3344 * this command turns it on or off, or sets up a periodic blinking cycle. 3321 * this command turns it on or off, or sets up a periodic blinking cycle.
3345 */ 3322 */
3346struct iwl_led_cmd { 3323struct il_led_cmd {
3347 __le32 interval; /* "interval" in uSec */ 3324 __le32 interval; /* "interval" in uSec */
3348 u8 id; /* 1: Activity, 2: Link, 3: Tech */ 3325 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3349 u8 off; /* # intervals off while blinking; 3326 u8 off; /* # intervals off while blinking;
@@ -3353,14 +3330,15 @@ struct iwl_led_cmd {
3353 u8 reserved; 3330 u8 reserved;
3354} __packed; 3331} __packed;
3355 3332
3356
3357/****************************************************************************** 3333/******************************************************************************
3358 * (13) 3334 * (13)
3359 * Union of all expected notifications/responses: 3335 * Union of all expected notifications/responses:
3360 * 3336 *
3361 *****************************************************************************/ 3337 *****************************************************************************/
3362 3338
3363struct iwl_rx_packet { 3339#define IL_RX_FRAME_SIZE_MSK 0x00003fff
3340
3341struct il_rx_pkt {
3364 /* 3342 /*
3365 * The first 4 bytes of the RX frame header contain both the RX frame 3343 * The first 4 bytes of the RX frame header contain both the RX frame
3366 * size and some flags. 3344 * size and some flags.
@@ -3372,27 +3350,27 @@ struct iwl_rx_packet {
3372 * 13-00: RX frame size 3350 * 13-00: RX frame size
3373 */ 3351 */
3374 __le32 len_n_flags; 3352 __le32 len_n_flags;
3375 struct iwl_cmd_header hdr; 3353 struct il_cmd_header hdr;
3376 union { 3354 union {
3377 struct iwl3945_rx_frame rx_frame; 3355 struct il3945_rx_frame rx_frame;
3378 struct iwl3945_tx_resp tx_resp; 3356 struct il3945_tx_resp tx_resp;
3379 struct iwl3945_beacon_notif beacon_status; 3357 struct il3945_beacon_notif beacon_status;
3380 3358
3381 struct iwl_alive_resp alive_frame; 3359 struct il_alive_resp alive_frame;
3382 struct iwl_spectrum_notification spectrum_notif; 3360 struct il_spectrum_notification spectrum_notif;
3383 struct iwl_csa_notification csa_notif; 3361 struct il_csa_notification csa_notif;
3384 struct iwl_error_resp err_resp; 3362 struct il_error_resp err_resp;
3385 struct iwl_card_state_notif card_state_notif; 3363 struct il_card_state_notif card_state_notif;
3386 struct iwl_add_sta_resp add_sta; 3364 struct il_add_sta_resp add_sta;
3387 struct iwl_rem_sta_resp rem_sta; 3365 struct il_rem_sta_resp rem_sta;
3388 struct iwl_sleep_notification sleep_notif; 3366 struct il_sleep_notification sleep_notif;
3389 struct iwl_spectrum_resp spectrum; 3367 struct il_spectrum_resp spectrum;
3390 struct iwl_notif_statistics stats; 3368 struct il_notif_stats stats;
3391 struct iwl_compressed_ba_resp compressed_ba; 3369 struct il_compressed_ba_resp compressed_ba;
3392 struct iwl_missed_beacon_notif missed_beacon; 3370 struct il_missed_beacon_notif missed_beacon;
3393 __le32 status; 3371 __le32 status;
3394 u8 raw[0]; 3372 u8 raw[0];
3395 } u; 3373 } u;
3396} __packed; 3374} __packed;
3397 3375
3398#endif /* __iwl_legacy_commands_h__ */ 3376#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
new file mode 100644
index 000000000000..881ba043770a
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -0,0 +1,5706 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/lockdep.h>
36#include <linux/init.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/skbuff.h>
41#include <net/mac80211.h>
42
43#include "common.h"
44
45const char *
46il_get_cmd_string(u8 cmd)
47{
48 switch (cmd) {
49 IL_CMD(N_ALIVE);
50 IL_CMD(N_ERROR);
51 IL_CMD(C_RXON);
52 IL_CMD(C_RXON_ASSOC);
53 IL_CMD(C_QOS_PARAM);
54 IL_CMD(C_RXON_TIMING);
55 IL_CMD(C_ADD_STA);
56 IL_CMD(C_REM_STA);
57 IL_CMD(C_WEPKEY);
58 IL_CMD(N_3945_RX);
59 IL_CMD(C_TX);
60 IL_CMD(C_RATE_SCALE);
61 IL_CMD(C_LEDS);
62 IL_CMD(C_TX_LINK_QUALITY_CMD);
63 IL_CMD(C_CHANNEL_SWITCH);
64 IL_CMD(N_CHANNEL_SWITCH);
65 IL_CMD(C_SPECTRUM_MEASUREMENT);
66 IL_CMD(N_SPECTRUM_MEASUREMENT);
67 IL_CMD(C_POWER_TBL);
68 IL_CMD(N_PM_SLEEP);
69 IL_CMD(N_PM_DEBUG_STATS);
70 IL_CMD(C_SCAN);
71 IL_CMD(C_SCAN_ABORT);
72 IL_CMD(N_SCAN_START);
73 IL_CMD(N_SCAN_RESULTS);
74 IL_CMD(N_SCAN_COMPLETE);
75 IL_CMD(N_BEACON);
76 IL_CMD(C_TX_BEACON);
77 IL_CMD(C_TX_PWR_TBL);
78 IL_CMD(C_BT_CONFIG);
79 IL_CMD(C_STATS);
80 IL_CMD(N_STATS);
81 IL_CMD(N_CARD_STATE);
82 IL_CMD(N_MISSED_BEACONS);
83 IL_CMD(C_CT_KILL_CONFIG);
84 IL_CMD(C_SENSITIVITY);
85 IL_CMD(C_PHY_CALIBRATION);
86 IL_CMD(N_RX_PHY);
87 IL_CMD(N_RX_MPDU);
88 IL_CMD(N_RX);
89 IL_CMD(N_COMPRESSED_BA);
90 default:
91 return "UNKNOWN";
92
93 }
94}
95EXPORT_SYMBOL(il_get_cmd_string);
96
97#define HOST_COMPLETE_TIMEOUT (HZ / 2)
98
99static void
100il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
101 struct il_rx_pkt *pkt)
102{
103 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
104 IL_ERR("Bad return from %s (0x%08X)\n",
105 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
106 return;
107 }
108#ifdef CONFIG_IWLEGACY_DEBUG
109 switch (cmd->hdr.cmd) {
110 case C_TX_LINK_QUALITY_CMD:
111 case C_SENSITIVITY:
112 D_HC_DUMP("back from %s (0x%08X)\n",
113 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
114 break;
115 default:
116 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
117 pkt->hdr.flags);
118 }
119#endif
120}
121
122static int
123il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
124{
125 int ret;
126
127 BUG_ON(!(cmd->flags & CMD_ASYNC));
128
129 /* An asynchronous command can not expect an SKB to be set. */
130 BUG_ON(cmd->flags & CMD_WANT_SKB);
131
132 /* Assign a generic callback if one is not provided */
133 if (!cmd->callback)
134 cmd->callback = il_generic_cmd_callback;
135
136 if (test_bit(S_EXIT_PENDING, &il->status))
137 return -EBUSY;
138
139 ret = il_enqueue_hcmd(il, cmd);
140 if (ret < 0) {
141 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
142 il_get_cmd_string(cmd->id), ret);
143 return ret;
144 }
145 return 0;
146}
147
148int
149il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
150{
151 int cmd_idx;
152 int ret;
153
154 lockdep_assert_held(&il->mutex);
155
156 BUG_ON(cmd->flags & CMD_ASYNC);
157
158 /* A synchronous command can not have a callback set. */
159 BUG_ON(cmd->callback);
160
161 D_INFO("Attempting to send sync command %s\n",
162 il_get_cmd_string(cmd->id));
163
164 set_bit(S_HCMD_ACTIVE, &il->status);
165 D_INFO("Setting HCMD_ACTIVE for command %s\n",
166 il_get_cmd_string(cmd->id));
167
168 cmd_idx = il_enqueue_hcmd(il, cmd);
169 if (cmd_idx < 0) {
170 ret = cmd_idx;
171 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
172 il_get_cmd_string(cmd->id), ret);
173 goto out;
174 }
175
176 ret = wait_event_timeout(il->wait_command_queue,
177 !test_bit(S_HCMD_ACTIVE, &il->status),
178 HOST_COMPLETE_TIMEOUT);
179 if (!ret) {
180 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
181 IL_ERR("Error sending %s: time out after %dms.\n",
182 il_get_cmd_string(cmd->id),
183 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
184
185 clear_bit(S_HCMD_ACTIVE, &il->status);
186 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
187 il_get_cmd_string(cmd->id));
188 ret = -ETIMEDOUT;
189 goto cancel;
190 }
191 }
192
193 if (test_bit(S_RF_KILL_HW, &il->status)) {
194 IL_ERR("Command %s aborted: RF KILL Switch\n",
195 il_get_cmd_string(cmd->id));
196 ret = -ECANCELED;
197 goto fail;
198 }
199 if (test_bit(S_FW_ERROR, &il->status)) {
200 IL_ERR("Command %s failed: FW Error\n",
201 il_get_cmd_string(cmd->id));
202 ret = -EIO;
203 goto fail;
204 }
205 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
206 IL_ERR("Error: Response NULL in '%s'\n",
207 il_get_cmd_string(cmd->id));
208 ret = -EIO;
209 goto cancel;
210 }
211
212 ret = 0;
213 goto out;
214
215cancel:
216 if (cmd->flags & CMD_WANT_SKB) {
217 /*
218 * Cancel the CMD_WANT_SKB flag for the cmd in the
219 * TX cmd queue. Otherwise in case the cmd comes
220 * in later, it will possibly set an invalid
221 * address (cmd->meta.source).
222 */
223 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
224 }
225fail:
226 if (cmd->reply_page) {
227 il_free_pages(il, cmd->reply_page);
228 cmd->reply_page = 0;
229 }
230out:
231 return ret;
232}
233EXPORT_SYMBOL(il_send_cmd_sync);
234
235int
236il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
237{
238 if (cmd->flags & CMD_ASYNC)
239 return il_send_cmd_async(il, cmd);
240
241 return il_send_cmd_sync(il, cmd);
242}
243EXPORT_SYMBOL(il_send_cmd);
244
245int
246il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
247{
248 struct il_host_cmd cmd = {
249 .id = id,
250 .len = len,
251 .data = data,
252 };
253
254 return il_send_cmd_sync(il, &cmd);
255}
256EXPORT_SYMBOL(il_send_cmd_pdu);
257
258int
259il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
260 void (*callback) (struct il_priv *il,
261 struct il_device_cmd *cmd,
262 struct il_rx_pkt *pkt))
263{
264 struct il_host_cmd cmd = {
265 .id = id,
266 .len = len,
267 .data = data,
268 };
269
270 cmd.flags |= CMD_ASYNC;
271 cmd.callback = callback;
272
273 return il_send_cmd_async(il, &cmd);
274}
275EXPORT_SYMBOL(il_send_cmd_pdu_async);
276
277/* default: IL_LED_BLINK(0) using blinking idx table */
278static int led_mode;
279module_param(led_mode, int, S_IRUGO);
280MODULE_PARM_DESC(led_mode,
281 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
282
283/* Throughput OFF time(ms) ON time (ms)
284 * >300 25 25
285 * >200 to 300 40 40
286 * >100 to 200 55 55
287 * >70 to 100 65 65
288 * >50 to 70 75 75
289 * >20 to 50 85 85
290 * >10 to 20 95 95
291 * >5 to 10 110 110
292 * >1 to 5 130 130
293 * >0 to 1 167 167
294 * <=0 SOLID ON
295 */
296static const struct ieee80211_tpt_blink il_blink[] = {
297 {.throughput = 0, .blink_time = 334},
298 {.throughput = 1 * 1024 - 1, .blink_time = 260},
299 {.throughput = 5 * 1024 - 1, .blink_time = 220},
300 {.throughput = 10 * 1024 - 1, .blink_time = 190},
301 {.throughput = 20 * 1024 - 1, .blink_time = 170},
302 {.throughput = 50 * 1024 - 1, .blink_time = 150},
303 {.throughput = 70 * 1024 - 1, .blink_time = 130},
304 {.throughput = 100 * 1024 - 1, .blink_time = 110},
305 {.throughput = 200 * 1024 - 1, .blink_time = 80},
306 {.throughput = 300 * 1024 - 1, .blink_time = 50},
307};
308
309/*
310 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
311 * Led blink rate analysis showed an average deviation of 0% on 3945,
312 * 5% on 4965 HW.
313 * Need to compensate on the led on/off time per HW according to the deviation
314 * to achieve the desired led frequency
315 * The calculation is: (100-averageDeviation)/100 * blinkTime
316 * For code efficiency the calculation will be:
317 * compensation = (100 - averageDeviation) * 64 / 100
318 * NewBlinkTime = (compensation * BlinkTime) / 64
319 */
320static inline u8
321il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
322{
323 if (!compensation) {
324 IL_ERR("undefined blink compensation: "
325 "use pre-defined blinking time\n");
326 return time;
327 }
328
329 return (u8) ((time * compensation) >> 6);
330}
331
332/* Set led pattern command */
333static int
334il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
335{
336 struct il_led_cmd led_cmd = {
337 .id = IL_LED_LINK,
338 .interval = IL_DEF_LED_INTRVL
339 };
340 int ret;
341
342 if (!test_bit(S_READY, &il->status))
343 return -EBUSY;
344
345 if (il->blink_on == on && il->blink_off == off)
346 return 0;
347
348 if (off == 0) {
349 /* led is SOLID_ON */
350 on = IL_LED_SOLID;
351 }
352
353 D_LED("Led blink time compensation=%u\n",
354 il->cfg->base_params->led_compensation);
355 led_cmd.on =
356 il_blink_compensation(il, on,
357 il->cfg->base_params->led_compensation);
358 led_cmd.off =
359 il_blink_compensation(il, off,
360 il->cfg->base_params->led_compensation);
361
362 ret = il->cfg->ops->led->cmd(il, &led_cmd);
363 if (!ret) {
364 il->blink_on = on;
365 il->blink_off = off;
366 }
367 return ret;
368}
369
370static void
371il_led_brightness_set(struct led_classdev *led_cdev,
372 enum led_brightness brightness)
373{
374 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
375 unsigned long on = 0;
376
377 if (brightness > 0)
378 on = IL_LED_SOLID;
379
380 il_led_cmd(il, on, 0);
381}
382
383static int
384il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
385 unsigned long *delay_off)
386{
387 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
388
389 return il_led_cmd(il, *delay_on, *delay_off);
390}
391
392void
393il_leds_init(struct il_priv *il)
394{
395 int mode = led_mode;
396 int ret;
397
398 if (mode == IL_LED_DEFAULT)
399 mode = il->cfg->led_mode;
400
401 il->led.name =
402 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
403 il->led.brightness_set = il_led_brightness_set;
404 il->led.blink_set = il_led_blink_set;
405 il->led.max_brightness = 1;
406
407 switch (mode) {
408 case IL_LED_DEFAULT:
409 WARN_ON(1);
410 break;
411 case IL_LED_BLINK:
412 il->led.default_trigger =
413 ieee80211_create_tpt_led_trigger(il->hw,
414 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
415 il_blink,
416 ARRAY_SIZE(il_blink));
417 break;
418 case IL_LED_RF_STATE:
419 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
420 break;
421 }
422
423 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
424 if (ret) {
425 kfree(il->led.name);
426 return;
427 }
428
429 il->led_registered = true;
430}
431EXPORT_SYMBOL(il_leds_init);
432
433void
434il_leds_exit(struct il_priv *il)
435{
436 if (!il->led_registered)
437 return;
438
439 led_classdev_unregister(&il->led);
440 kfree(il->led.name);
441}
442EXPORT_SYMBOL(il_leds_exit);
443
444/************************** EEPROM BANDS ****************************
445 *
446 * The il_eeprom_band definitions below provide the mapping from the
447 * EEPROM contents to the specific channel number supported for each
448 * band.
449 *
450 * For example, il_priv->eeprom.band_3_channels[4] from the band_3
451 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
452 * The specific geography and calibration information for that channel
453 * is contained in the eeprom map itself.
454 *
455 * During init, we copy the eeprom information and channel map
456 * information into il->channel_info_24/52 and il->channel_map_24/52
457 *
458 * channel_map_24/52 provides the idx in the channel_info array for a
459 * given channel. We have to have two separate maps as there is channel
460 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
461 * band_2
462 *
463 * A value of 0xff stored in the channel_map indicates that the channel
464 * is not supported by the hardware at all.
465 *
466 * A value of 0xfe in the channel_map indicates that the channel is not
467 * valid for Tx with the current hardware. This means that
468 * while the system can tune and receive on a given channel, it may not
469 * be able to associate or transmit any frames on that
470 * channel. There is no corresponding channel information for that
471 * entry.
472 *
473 *********************************************************************/
474
475/* 2.4 GHz */
476const u8 il_eeprom_band_1[14] = {
477 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
478};
479
480/* 5.2 GHz bands */
481static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
482 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
483};
484
485static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
486 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
487};
488
489static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
490 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
491};
492
493static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
494 145, 149, 153, 157, 161, 165
495};
496
497static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
498 1, 2, 3, 4, 5, 6, 7
499};
500
501static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
502 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
503};
504
505/******************************************************************************
506 *
507 * EEPROM related functions
508 *
509******************************************************************************/
510
511static int
512il_eeprom_verify_signature(struct il_priv *il)
513{
514 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
515 int ret = 0;
516
517 D_EEPROM("EEPROM signature=0x%08x\n", gp);
518 switch (gp) {
519 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
520 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
521 break;
522 default:
523 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
524 ret = -ENOENT;
525 break;
526 }
527 return ret;
528}
529
530const u8 *
531il_eeprom_query_addr(const struct il_priv *il, size_t offset)
532{
533 BUG_ON(offset >= il->cfg->base_params->eeprom_size);
534 return &il->eeprom[offset];
535}
536EXPORT_SYMBOL(il_eeprom_query_addr);
537
538u16
539il_eeprom_query16(const struct il_priv *il, size_t offset)
540{
541 if (!il->eeprom)
542 return 0;
543 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
544}
545EXPORT_SYMBOL(il_eeprom_query16);
546
547/**
548 * il_eeprom_init - read EEPROM contents
549 *
550 * Load the EEPROM contents from adapter into il->eeprom
551 *
552 * NOTE: This routine uses the non-debug IO access functions.
553 */
554int
555il_eeprom_init(struct il_priv *il)
556{
557 __le16 *e;
558 u32 gp = _il_rd(il, CSR_EEPROM_GP);
559 int sz;
560 int ret;
561 u16 addr;
562
563 /* allocate eeprom */
564 sz = il->cfg->base_params->eeprom_size;
565 D_EEPROM("NVM size = %d\n", sz);
566 il->eeprom = kzalloc(sz, GFP_KERNEL);
567 if (!il->eeprom) {
568 ret = -ENOMEM;
569 goto alloc_err;
570 }
571 e = (__le16 *) il->eeprom;
572
573 il->cfg->ops->lib->apm_ops.init(il);
574
575 ret = il_eeprom_verify_signature(il);
576 if (ret < 0) {
577 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
578 ret = -ENOENT;
579 goto err;
580 }
581
582 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
583 ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
584 if (ret < 0) {
585 IL_ERR("Failed to acquire EEPROM semaphore.\n");
586 ret = -ENOENT;
587 goto err;
588 }
589
590 /* eeprom is an array of 16bit values */
591 for (addr = 0; addr < sz; addr += sizeof(u16)) {
592 u32 r;
593
594 _il_wr(il, CSR_EEPROM_REG,
595 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
596
597 ret =
598 _il_poll_bit(il, CSR_EEPROM_REG,
599 CSR_EEPROM_REG_READ_VALID_MSK,
600 CSR_EEPROM_REG_READ_VALID_MSK,
601 IL_EEPROM_ACCESS_TIMEOUT);
602 if (ret < 0) {
603 IL_ERR("Time out reading EEPROM[%d]\n", addr);
604 goto done;
605 }
606 r = _il_rd(il, CSR_EEPROM_REG);
607 e[addr / 2] = cpu_to_le16(r >> 16);
608 }
609
610 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
611 il_eeprom_query16(il, EEPROM_VERSION));
612
613 ret = 0;
614done:
615 il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
616
617err:
618 if (ret)
619 il_eeprom_free(il);
620 /* Reset chip to save power until we load uCode during "up". */
621 il_apm_stop(il);
622alloc_err:
623 return ret;
624}
625EXPORT_SYMBOL(il_eeprom_init);
626
627void
628il_eeprom_free(struct il_priv *il)
629{
630 kfree(il->eeprom);
631 il->eeprom = NULL;
632}
633EXPORT_SYMBOL(il_eeprom_free);
634
635static void
636il_init_band_reference(const struct il_priv *il, int eep_band,
637 int *eeprom_ch_count,
638 const struct il_eeprom_channel **eeprom_ch_info,
639 const u8 **eeprom_ch_idx)
640{
641 u32 offset =
642 il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
643 switch (eep_band) {
644 case 1: /* 2.4GHz band */
645 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
646 *eeprom_ch_info =
647 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
648 offset);
649 *eeprom_ch_idx = il_eeprom_band_1;
650 break;
651 case 2: /* 4.9GHz band */
652 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
653 *eeprom_ch_info =
654 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
655 offset);
656 *eeprom_ch_idx = il_eeprom_band_2;
657 break;
658 case 3: /* 5.2GHz band */
659 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
660 *eeprom_ch_info =
661 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
662 offset);
663 *eeprom_ch_idx = il_eeprom_band_3;
664 break;
665 case 4: /* 5.5GHz band */
666 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
667 *eeprom_ch_info =
668 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
669 offset);
670 *eeprom_ch_idx = il_eeprom_band_4;
671 break;
672 case 5: /* 5.7GHz band */
673 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
674 *eeprom_ch_info =
675 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
676 offset);
677 *eeprom_ch_idx = il_eeprom_band_5;
678 break;
679 case 6: /* 2.4GHz ht40 channels */
680 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
681 *eeprom_ch_info =
682 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
683 offset);
684 *eeprom_ch_idx = il_eeprom_band_6;
685 break;
686 case 7: /* 5 GHz ht40 channels */
687 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
688 *eeprom_ch_info =
689 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
690 offset);
691 *eeprom_ch_idx = il_eeprom_band_7;
692 break;
693 default:
694 BUG();
695 }
696}
697
698#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
699 ? # x " " : "")
700/**
701 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
702 *
703 * Does not set up a command, or touch hardware.
704 */
705static int
706il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
707 const struct il_eeprom_channel *eeprom_ch,
708 u8 clear_ht40_extension_channel)
709{
710 struct il_channel_info *ch_info;
711
712 ch_info =
713 (struct il_channel_info *)il_get_channel_info(il, band, channel);
714
715 if (!il_is_channel_valid(ch_info))
716 return -1;
717
718 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
719 " Ad-Hoc %ssupported\n", ch_info->channel,
720 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
721 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
722 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
723 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
724 eeprom_ch->max_power_avg,
725 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
726 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
727
728 ch_info->ht40_eeprom = *eeprom_ch;
729 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
730 ch_info->ht40_flags = eeprom_ch->flags;
731 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
732 ch_info->ht40_extension_channel &=
733 ~clear_ht40_extension_channel;
734
735 return 0;
736}
737
738#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
739 ? # x " " : "")
740
741/**
742 * il_init_channel_map - Set up driver's info for all possible channels
743 */
744int
745il_init_channel_map(struct il_priv *il)
746{
747 int eeprom_ch_count = 0;
748 const u8 *eeprom_ch_idx = NULL;
749 const struct il_eeprom_channel *eeprom_ch_info = NULL;
750 int band, ch;
751 struct il_channel_info *ch_info;
752
753 if (il->channel_count) {
754 D_EEPROM("Channel map already initialized.\n");
755 return 0;
756 }
757
758 D_EEPROM("Initializing regulatory info from EEPROM\n");
759
760 il->channel_count =
761 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
762 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
763 ARRAY_SIZE(il_eeprom_band_5);
764
765 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
766
767 il->channel_info =
768 kzalloc(sizeof(struct il_channel_info) * il->channel_count,
769 GFP_KERNEL);
770 if (!il->channel_info) {
771 IL_ERR("Could not allocate channel_info\n");
772 il->channel_count = 0;
773 return -ENOMEM;
774 }
775
776 ch_info = il->channel_info;
777
778 /* Loop through the 5 EEPROM bands adding them in order to the
779 * channel map we maintain (that contains additional information than
780 * what just in the EEPROM) */
781 for (band = 1; band <= 5; band++) {
782
783 il_init_band_reference(il, band, &eeprom_ch_count,
784 &eeprom_ch_info, &eeprom_ch_idx);
785
786 /* Loop through each band adding each of the channels */
787 for (ch = 0; ch < eeprom_ch_count; ch++) {
788 ch_info->channel = eeprom_ch_idx[ch];
789 ch_info->band =
790 (band ==
791 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
792
793 /* permanently store EEPROM's channel regulatory flags
794 * and max power in channel info database. */
795 ch_info->eeprom = eeprom_ch_info[ch];
796
797 /* Copy the run-time flags so they are there even on
798 * invalid channels */
799 ch_info->flags = eeprom_ch_info[ch].flags;
800 /* First write that ht40 is not enabled, and then enable
801 * one by one */
802 ch_info->ht40_extension_channel =
803 IEEE80211_CHAN_NO_HT40;
804
805 if (!(il_is_channel_valid(ch_info))) {
806 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
807 "No traffic\n", ch_info->channel,
808 ch_info->flags,
809 il_is_channel_a_band(ch_info) ? "5.2" :
810 "2.4");
811 ch_info++;
812 continue;
813 }
814
815 /* Initialize regulatory-based run-time data */
816 ch_info->max_power_avg = ch_info->curr_txpow =
817 eeprom_ch_info[ch].max_power_avg;
818 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
819 ch_info->min_power = 0;
820
821 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
822 " Ad-Hoc %ssupported\n", ch_info->channel,
823 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
824 CHECK_AND_PRINT_I(VALID),
825 CHECK_AND_PRINT_I(IBSS),
826 CHECK_AND_PRINT_I(ACTIVE),
827 CHECK_AND_PRINT_I(RADAR),
828 CHECK_AND_PRINT_I(WIDE),
829 CHECK_AND_PRINT_I(DFS),
830 eeprom_ch_info[ch].flags,
831 eeprom_ch_info[ch].max_power_avg,
832 ((eeprom_ch_info[ch].
833 flags & EEPROM_CHANNEL_IBSS) &&
834 !(eeprom_ch_info[ch].
835 flags & EEPROM_CHANNEL_RADAR)) ? "" :
836 "not ");
837
838 ch_info++;
839 }
840 }
841
842 /* Check if we do have HT40 channels */
843 if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
844 EEPROM_REGULATORY_BAND_NO_HT40 &&
845 il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
846 EEPROM_REGULATORY_BAND_NO_HT40)
847 return 0;
848
849 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
850 for (band = 6; band <= 7; band++) {
851 enum ieee80211_band ieeeband;
852
853 il_init_band_reference(il, band, &eeprom_ch_count,
854 &eeprom_ch_info, &eeprom_ch_idx);
855
856 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
857 ieeeband =
858 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
859
860 /* Loop through each band adding each of the channels */
861 for (ch = 0; ch < eeprom_ch_count; ch++) {
862 /* Set up driver's info for lower half */
863 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
864 &eeprom_ch_info[ch],
865 IEEE80211_CHAN_NO_HT40PLUS);
866
867 /* Set up driver's info for upper half */
868 il_mod_ht40_chan_info(il, ieeeband,
869 eeprom_ch_idx[ch] + 4,
870 &eeprom_ch_info[ch],
871 IEEE80211_CHAN_NO_HT40MINUS);
872 }
873 }
874
875 return 0;
876}
877EXPORT_SYMBOL(il_init_channel_map);
878
879/*
880 * il_free_channel_map - undo allocations in il_init_channel_map
881 */
882void
883il_free_channel_map(struct il_priv *il)
884{
885 kfree(il->channel_info);
886 il->channel_count = 0;
887}
888EXPORT_SYMBOL(il_free_channel_map);
889
890/**
891 * il_get_channel_info - Find driver's ilate channel info
892 *
893 * Based on band and channel number.
894 */
895const struct il_channel_info *
896il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
897 u16 channel)
898{
899 int i;
900
901 switch (band) {
902 case IEEE80211_BAND_5GHZ:
903 for (i = 14; i < il->channel_count; i++) {
904 if (il->channel_info[i].channel == channel)
905 return &il->channel_info[i];
906 }
907 break;
908 case IEEE80211_BAND_2GHZ:
909 if (channel >= 1 && channel <= 14)
910 return &il->channel_info[channel - 1];
911 break;
912 default:
913 BUG();
914 }
915
916 return NULL;
917}
918EXPORT_SYMBOL(il_get_channel_info);
919
920/*
921 * Setting power level allows the card to go to sleep when not busy.
922 *
923 * We calculate a sleep command based on the required latency, which
924 * we get from mac80211. In order to handle thermal throttling, we can
925 * also use pre-defined power levels.
926 */
927
928/*
929 * This defines the old power levels. They are still used by default
930 * (level 1) and for thermal throttle (levels 3 through 5)
931 */
932
933struct il_power_vec_entry {
934 struct il_powertable_cmd cmd;
935 u8 no_dtim; /* number of skip dtim */
936};
937
938static void
939il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
940{
941 memset(cmd, 0, sizeof(*cmd));
942
943 if (il->power_data.pci_pm)
944 cmd->flags |= IL_POWER_PCI_PM_MSK;
945
946 D_POWER("Sleep command for CAM\n");
947}
948
949static int
950il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
951{
952 D_POWER("Sending power/sleep command\n");
953 D_POWER("Flags value = 0x%08X\n", cmd->flags);
954 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
955 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
956 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
957 le32_to_cpu(cmd->sleep_interval[0]),
958 le32_to_cpu(cmd->sleep_interval[1]),
959 le32_to_cpu(cmd->sleep_interval[2]),
960 le32_to_cpu(cmd->sleep_interval[3]),
961 le32_to_cpu(cmd->sleep_interval[4]));
962
963 return il_send_cmd_pdu(il, C_POWER_TBL,
964 sizeof(struct il_powertable_cmd), cmd);
965}
966
967int
968il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
969{
970 int ret;
971 bool update_chains;
972
973 lockdep_assert_held(&il->mutex);
974
975 /* Don't update the RX chain when chain noise calibration is running */
976 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
977 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
978
979 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
980 return 0;
981
982 if (!il_is_ready_rf(il))
983 return -EIO;
984
985 /* scan complete use sleep_power_next, need to be updated */
986 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
987 if (test_bit(S_SCANNING, &il->status) && !force) {
988 D_INFO("Defer power set mode while scanning\n");
989 return 0;
990 }
991
992 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
993 set_bit(S_POWER_PMI, &il->status);
994
995 ret = il_set_power(il, cmd);
996 if (!ret) {
997 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
998 clear_bit(S_POWER_PMI, &il->status);
999
1000 if (il->cfg->ops->lib->update_chain_flags && update_chains)
1001 il->cfg->ops->lib->update_chain_flags(il);
1002 else if (il->cfg->ops->lib->update_chain_flags)
1003 D_POWER("Cannot update the power, chain noise "
1004 "calibration running: %d\n",
1005 il->chain_noise_data.state);
1006
1007 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1008 } else
1009 IL_ERR("set power fail, ret = %d", ret);
1010
1011 return ret;
1012}
1013
1014int
1015il_power_update_mode(struct il_priv *il, bool force)
1016{
1017 struct il_powertable_cmd cmd;
1018
1019 il_power_sleep_cam_cmd(il, &cmd);
1020 return il_power_set_mode(il, &cmd, force);
1021}
1022EXPORT_SYMBOL(il_power_update_mode);
1023
1024/* initialize to default */
1025void
1026il_power_initialize(struct il_priv *il)
1027{
1028 u16 lctl = il_pcie_link_ctl(il);
1029
1030 il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
1031
1032 il->power_data.debug_sleep_level_override = -1;
1033
1034 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1035}
1036EXPORT_SYMBOL(il_power_initialize);
1037
1038/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
1039 * sending probe req. This should be set long enough to hear probe responses
1040 * from more than one AP. */
1041#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
1042#define IL_ACTIVE_DWELL_TIME_52 (20)
1043
1044#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1045#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1046
1047/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
1048 * Must be set longer than active dwell time.
1049 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
1050#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
1051#define IL_PASSIVE_DWELL_TIME_52 (10)
1052#define IL_PASSIVE_DWELL_BASE (100)
1053#define IL_CHANNEL_TUNE_TIME 5
1054
1055static int
1056il_send_scan_abort(struct il_priv *il)
1057{
1058 int ret;
1059 struct il_rx_pkt *pkt;
1060 struct il_host_cmd cmd = {
1061 .id = C_SCAN_ABORT,
1062 .flags = CMD_WANT_SKB,
1063 };
1064
1065 /* Exit instantly with error when device is not ready
1066 * to receive scan abort command or it does not perform
1067 * hardware scan currently */
1068 if (!test_bit(S_READY, &il->status) ||
1069 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1070 !test_bit(S_SCAN_HW, &il->status) ||
1071 test_bit(S_FW_ERROR, &il->status) ||
1072 test_bit(S_EXIT_PENDING, &il->status))
1073 return -EIO;
1074
1075 ret = il_send_cmd_sync(il, &cmd);
1076 if (ret)
1077 return ret;
1078
1079 pkt = (struct il_rx_pkt *)cmd.reply_page;
1080 if (pkt->u.status != CAN_ABORT_STATUS) {
1081 /* The scan abort will return 1 for success or
1082 * 2 for "failure". A failure condition can be
1083 * due to simply not being in an active scan which
1084 * can occur if we send the scan abort before we
1085 * the microcode has notified us that a scan is
1086 * completed. */
1087 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1088 ret = -EIO;
1089 }
1090
1091 il_free_pages(il, cmd.reply_page);
1092 return ret;
1093}
1094
1095static void
1096il_complete_scan(struct il_priv *il, bool aborted)
1097{
1098 /* check if scan was requested from mac80211 */
1099 if (il->scan_request) {
1100 D_SCAN("Complete scan in mac80211\n");
1101 ieee80211_scan_completed(il->hw, aborted);
1102 }
1103
1104 il->scan_vif = NULL;
1105 il->scan_request = NULL;
1106}
1107
1108void
1109il_force_scan_end(struct il_priv *il)
1110{
1111 lockdep_assert_held(&il->mutex);
1112
1113 if (!test_bit(S_SCANNING, &il->status)) {
1114 D_SCAN("Forcing scan end while not scanning\n");
1115 return;
1116 }
1117
1118 D_SCAN("Forcing scan end\n");
1119 clear_bit(S_SCANNING, &il->status);
1120 clear_bit(S_SCAN_HW, &il->status);
1121 clear_bit(S_SCAN_ABORTING, &il->status);
1122 il_complete_scan(il, true);
1123}
1124
1125static void
1126il_do_scan_abort(struct il_priv *il)
1127{
1128 int ret;
1129
1130 lockdep_assert_held(&il->mutex);
1131
1132 if (!test_bit(S_SCANNING, &il->status)) {
1133 D_SCAN("Not performing scan to abort\n");
1134 return;
1135 }
1136
1137 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1138 D_SCAN("Scan abort in progress\n");
1139 return;
1140 }
1141
1142 ret = il_send_scan_abort(il);
1143 if (ret) {
1144 D_SCAN("Send scan abort failed %d\n", ret);
1145 il_force_scan_end(il);
1146 } else
1147 D_SCAN("Successfully send scan abort\n");
1148}
1149
1150/**
1151 * il_scan_cancel - Cancel any currently executing HW scan
1152 */
1153int
1154il_scan_cancel(struct il_priv *il)
1155{
1156 D_SCAN("Queuing abort scan\n");
1157 queue_work(il->workqueue, &il->abort_scan);
1158 return 0;
1159}
1160EXPORT_SYMBOL(il_scan_cancel);
1161
1162/**
1163 * il_scan_cancel_timeout - Cancel any currently executing HW scan
1164 * @ms: amount of time to wait (in milliseconds) for scan to abort
1165 *
1166 */
1167int
1168il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1169{
1170 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1171
1172 lockdep_assert_held(&il->mutex);
1173
1174 D_SCAN("Scan cancel timeout\n");
1175
1176 il_do_scan_abort(il);
1177
1178 while (time_before_eq(jiffies, timeout)) {
1179 if (!test_bit(S_SCAN_HW, &il->status))
1180 break;
1181 msleep(20);
1182 }
1183
1184 return test_bit(S_SCAN_HW, &il->status);
1185}
1186EXPORT_SYMBOL(il_scan_cancel_timeout);
1187
1188/* Service response to C_SCAN (0x80) */
1189static void
1190il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1191{
1192#ifdef CONFIG_IWLEGACY_DEBUG
1193 struct il_rx_pkt *pkt = rxb_addr(rxb);
1194 struct il_scanreq_notification *notif =
1195 (struct il_scanreq_notification *)pkt->u.raw;
1196
1197 D_SCAN("Scan request status = 0x%x\n", notif->status);
1198#endif
1199}
1200
1201/* Service N_SCAN_START (0x82) */
1202static void
1203il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1204{
1205 struct il_rx_pkt *pkt = rxb_addr(rxb);
1206 struct il_scanstart_notification *notif =
1207 (struct il_scanstart_notification *)pkt->u.raw;
1208 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1209 D_SCAN("Scan start: " "%d [802.11%s] "
1210 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1211 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1212 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1213}
1214
1215/* Service N_SCAN_RESULTS (0x83) */
1216static void
1217il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1218{
1219#ifdef CONFIG_IWLEGACY_DEBUG
1220 struct il_rx_pkt *pkt = rxb_addr(rxb);
1221 struct il_scanresults_notification *notif =
1222 (struct il_scanresults_notification *)pkt->u.raw;
1223
1224 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1225 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1226 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1227 le32_to_cpu(notif->stats[0]),
1228 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1229#endif
1230}
1231
1232/* Service N_SCAN_COMPLETE (0x84) */
1233static void
1234il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1235{
1236
1237#ifdef CONFIG_IWLEGACY_DEBUG
1238 struct il_rx_pkt *pkt = rxb_addr(rxb);
1239 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1240#endif
1241
1242 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1243 scan_notif->scanned_channels, scan_notif->tsf_low,
1244 scan_notif->tsf_high, scan_notif->status);
1245
1246 /* The HW is no longer scanning */
1247 clear_bit(S_SCAN_HW, &il->status);
1248
1249 D_SCAN("Scan on %sGHz took %dms\n",
1250 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
1251 jiffies_to_msecs(jiffies - il->scan_start));
1252
1253 queue_work(il->workqueue, &il->scan_completed);
1254}
1255
1256void
1257il_setup_rx_scan_handlers(struct il_priv *il)
1258{
1259 /* scan handlers */
1260 il->handlers[C_SCAN] = il_hdl_scan;
1261 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1262 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1263 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1264}
1265EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1266
1267inline u16
1268il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1269 u8 n_probes)
1270{
1271 if (band == IEEE80211_BAND_5GHZ)
1272 return IL_ACTIVE_DWELL_TIME_52 +
1273 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1274 else
1275 return IL_ACTIVE_DWELL_TIME_24 +
1276 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1277}
1278EXPORT_SYMBOL(il_get_active_dwell_time);
1279
1280u16
1281il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1282 struct ieee80211_vif *vif)
1283{
1284 struct il_rxon_context *ctx = &il->ctx;
1285 u16 value;
1286
1287 u16 passive =
1288 (band ==
1289 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1290 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1291 IL_PASSIVE_DWELL_TIME_52;
1292
1293 if (il_is_any_associated(il)) {
1294 /*
1295 * If we're associated, we clamp the maximum passive
1296 * dwell time to be 98% of the smallest beacon interval
1297 * (minus 2 * channel tune time)
1298 */
1299 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
1300 if (value > IL_PASSIVE_DWELL_BASE || !value)
1301 value = IL_PASSIVE_DWELL_BASE;
1302 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1303 passive = min(value, passive);
1304 }
1305
1306 return passive;
1307}
1308EXPORT_SYMBOL(il_get_passive_dwell_time);
1309
1310void
1311il_init_scan_params(struct il_priv *il)
1312{
1313 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1314 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
1315 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1316 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
1317 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1318}
1319EXPORT_SYMBOL(il_init_scan_params);
1320
1321static int
1322il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1323{
1324 int ret;
1325
1326 lockdep_assert_held(&il->mutex);
1327
1328 if (WARN_ON(!il->cfg->ops->utils->request_scan))
1329 return -EOPNOTSUPP;
1330
1331 cancel_delayed_work(&il->scan_check);
1332
1333 if (!il_is_ready_rf(il)) {
1334 IL_WARN("Request scan called when driver not ready.\n");
1335 return -EIO;
1336 }
1337
1338 if (test_bit(S_SCAN_HW, &il->status)) {
1339 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1340 return -EBUSY;
1341 }
1342
1343 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1344 D_SCAN("Scan request while abort pending.\n");
1345 return -EBUSY;
1346 }
1347
1348 D_SCAN("Starting scan...\n");
1349
1350 set_bit(S_SCANNING, &il->status);
1351 il->scan_start = jiffies;
1352
1353 ret = il->cfg->ops->utils->request_scan(il, vif);
1354 if (ret) {
1355 clear_bit(S_SCANNING, &il->status);
1356 return ret;
1357 }
1358
1359 queue_delayed_work(il->workqueue, &il->scan_check,
1360 IL_SCAN_CHECK_WATCHDOG);
1361
1362 return 0;
1363}
1364
1365int
1366il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1367 struct cfg80211_scan_request *req)
1368{
1369 struct il_priv *il = hw->priv;
1370 int ret;
1371
1372 D_MAC80211("enter\n");
1373
1374 if (req->n_channels == 0)
1375 return -EINVAL;
1376
1377 mutex_lock(&il->mutex);
1378
1379 if (test_bit(S_SCANNING, &il->status)) {
1380 D_SCAN("Scan already in progress.\n");
1381 ret = -EAGAIN;
1382 goto out_unlock;
1383 }
1384
1385 /* mac80211 will only ask for one band at a time */
1386 il->scan_request = req;
1387 il->scan_vif = vif;
1388 il->scan_band = req->channels[0]->band;
1389
1390 ret = il_scan_initiate(il, vif);
1391
1392 D_MAC80211("leave\n");
1393
1394out_unlock:
1395 mutex_unlock(&il->mutex);
1396
1397 return ret;
1398}
1399EXPORT_SYMBOL(il_mac_hw_scan);
1400
1401static void
1402il_bg_scan_check(struct work_struct *data)
1403{
1404 struct il_priv *il =
1405 container_of(data, struct il_priv, scan_check.work);
1406
1407 D_SCAN("Scan check work\n");
1408
1409 /* Since we are here firmware does not finish scan and
1410 * most likely is in bad shape, so we don't bother to
1411 * send abort command, just force scan complete to mac80211 */
1412 mutex_lock(&il->mutex);
1413 il_force_scan_end(il);
1414 mutex_unlock(&il->mutex);
1415}
1416
1417/**
1418 * il_fill_probe_req - fill in all required fields and IE for probe request
1419 */
1420
1421u16
1422il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1423 const u8 *ta, const u8 *ies, int ie_len, int left)
1424{
1425 int len = 0;
1426 u8 *pos = NULL;
1427
1428 /* Make sure there is enough space for the probe request,
1429 * two mandatory IEs and the data */
1430 left -= 24;
1431 if (left < 0)
1432 return 0;
1433
1434 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1435 memcpy(frame->da, il_bcast_addr, ETH_ALEN);
1436 memcpy(frame->sa, ta, ETH_ALEN);
1437 memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
1438 frame->seq_ctrl = 0;
1439
1440 len += 24;
1441
1442 /* ...next IE... */
1443 pos = &frame->u.probe_req.variable[0];
1444
1445 /* fill in our indirect SSID IE */
1446 left -= 2;
1447 if (left < 0)
1448 return 0;
1449 *pos++ = WLAN_EID_SSID;
1450 *pos++ = 0;
1451
1452 len += 2;
1453
1454 if (WARN_ON(left < ie_len))
1455 return len;
1456
1457 if (ies && ie_len) {
1458 memcpy(pos, ies, ie_len);
1459 len += ie_len;
1460 }
1461
1462 return (u16) len;
1463}
1464EXPORT_SYMBOL(il_fill_probe_req);
1465
1466static void
1467il_bg_abort_scan(struct work_struct *work)
1468{
1469 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1470
1471 D_SCAN("Abort scan work\n");
1472
1473 /* We keep scan_check work queued in case when firmware will not
1474 * report back scan completed notification */
1475 mutex_lock(&il->mutex);
1476 il_scan_cancel_timeout(il, 200);
1477 mutex_unlock(&il->mutex);
1478}
1479
1480static void
1481il_bg_scan_completed(struct work_struct *work)
1482{
1483 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1484 bool aborted;
1485
1486 D_SCAN("Completed scan.\n");
1487
1488 cancel_delayed_work(&il->scan_check);
1489
1490 mutex_lock(&il->mutex);
1491
1492 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1493 if (aborted)
1494 D_SCAN("Aborted scan completed.\n");
1495
1496 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1497 D_SCAN("Scan already completed.\n");
1498 goto out_settings;
1499 }
1500
1501 il_complete_scan(il, aborted);
1502
1503out_settings:
1504 /* Can we still talk to firmware ? */
1505 if (!il_is_ready_rf(il))
1506 goto out;
1507
1508 /*
1509 * We do not commit power settings while scan is pending,
1510 * do it now if the settings changed.
1511 */
1512 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1513 il_set_tx_power(il, il->tx_power_next, false);
1514
1515 il->cfg->ops->utils->post_scan(il);
1516
1517out:
1518 mutex_unlock(&il->mutex);
1519}
1520
1521void
1522il_setup_scan_deferred_work(struct il_priv *il)
1523{
1524 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1525 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1526 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1527}
1528EXPORT_SYMBOL(il_setup_scan_deferred_work);
1529
1530void
1531il_cancel_scan_deferred_work(struct il_priv *il)
1532{
1533 cancel_work_sync(&il->abort_scan);
1534 cancel_work_sync(&il->scan_completed);
1535
1536 if (cancel_delayed_work_sync(&il->scan_check)) {
1537 mutex_lock(&il->mutex);
1538 il_force_scan_end(il);
1539 mutex_unlock(&il->mutex);
1540 }
1541}
1542EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1543
1544/* il->sta_lock must be held */
1545static void
1546il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1547{
1548
1549 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1550 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1551 sta_id, il->stations[sta_id].sta.sta.addr);
1552
1553 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1554 D_ASSOC("STA id %u addr %pM already present"
1555 " in uCode (according to driver)\n", sta_id,
1556 il->stations[sta_id].sta.sta.addr);
1557 } else {
1558 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1559 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1560 il->stations[sta_id].sta.sta.addr);
1561 }
1562}
1563
1564static int
1565il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1566 struct il_rx_pkt *pkt, bool sync)
1567{
1568 u8 sta_id = addsta->sta.sta_id;
1569 unsigned long flags;
1570 int ret = -EIO;
1571
1572 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1573 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1574 return ret;
1575 }
1576
1577 D_INFO("Processing response for adding station %u\n", sta_id);
1578
1579 spin_lock_irqsave(&il->sta_lock, flags);
1580
1581 switch (pkt->u.add_sta.status) {
1582 case ADD_STA_SUCCESS_MSK:
1583 D_INFO("C_ADD_STA PASSED\n");
1584 il_sta_ucode_activate(il, sta_id);
1585 ret = 0;
1586 break;
1587 case ADD_STA_NO_ROOM_IN_TBL:
1588 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1589 break;
1590 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1591 IL_ERR("Adding station %d failed, no block ack resource.\n",
1592 sta_id);
1593 break;
1594 case ADD_STA_MODIFY_NON_EXIST_STA:
1595 IL_ERR("Attempting to modify non-existing station %d\n",
1596 sta_id);
1597 break;
1598 default:
1599 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1600 break;
1601 }
1602
1603 D_INFO("%s station id %u addr %pM\n",
1604 il->stations[sta_id].sta.mode ==
1605 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1606 il->stations[sta_id].sta.sta.addr);
1607
1608 /*
1609 * XXX: The MAC address in the command buffer is often changed from
1610 * the original sent to the device. That is, the MAC address
1611 * written to the command buffer often is not the same MAC address
1612 * read from the command buffer when the command returns. This
1613 * issue has not yet been resolved and this debugging is left to
1614 * observe the problem.
1615 */
1616 D_INFO("%s station according to cmd buffer %pM\n",
1617 il->stations[sta_id].sta.mode ==
1618 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1619 spin_unlock_irqrestore(&il->sta_lock, flags);
1620
1621 return ret;
1622}
1623
1624static void
1625il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1626 struct il_rx_pkt *pkt)
1627{
1628 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1629
1630 il_process_add_sta_resp(il, addsta, pkt, false);
1631
1632}
1633
1634int
1635il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1636{
1637 struct il_rx_pkt *pkt = NULL;
1638 int ret = 0;
1639 u8 data[sizeof(*sta)];
1640 struct il_host_cmd cmd = {
1641 .id = C_ADD_STA,
1642 .flags = flags,
1643 .data = data,
1644 };
1645 u8 sta_id __maybe_unused = sta->sta.sta_id;
1646
1647 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1648 flags & CMD_ASYNC ? "a" : "");
1649
1650 if (flags & CMD_ASYNC)
1651 cmd.callback = il_add_sta_callback;
1652 else {
1653 cmd.flags |= CMD_WANT_SKB;
1654 might_sleep();
1655 }
1656
1657 cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
1658 ret = il_send_cmd(il, &cmd);
1659
1660 if (ret || (flags & CMD_ASYNC))
1661 return ret;
1662
1663 if (ret == 0) {
1664 pkt = (struct il_rx_pkt *)cmd.reply_page;
1665 ret = il_process_add_sta_resp(il, sta, pkt, true);
1666 }
1667 il_free_pages(il, cmd.reply_page);
1668
1669 return ret;
1670}
1671EXPORT_SYMBOL(il_send_add_sta);
1672
1673static void
1674il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
1675 struct il_rxon_context *ctx)
1676{
1677 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1678 __le32 sta_flags;
1679 u8 mimo_ps_mode;
1680
1681 if (!sta || !sta_ht_inf->ht_supported)
1682 goto done;
1683
1684 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
1685 D_ASSOC("spatial multiplexing power save mode: %s\n",
1686 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
1687 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
1688 "disabled");
1689
1690 sta_flags = il->stations[idx].sta.station_flags;
1691
1692 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1693
1694 switch (mimo_ps_mode) {
1695 case WLAN_HT_CAP_SM_PS_STATIC:
1696 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1697 break;
1698 case WLAN_HT_CAP_SM_PS_DYNAMIC:
1699 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1700 break;
1701 case WLAN_HT_CAP_SM_PS_DISABLED:
1702 break;
1703 default:
1704 IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
1705 break;
1706 }
1707
1708 sta_flags |=
1709 cpu_to_le32((u32) sta_ht_inf->
1710 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1711
1712 sta_flags |=
1713 cpu_to_le32((u32) sta_ht_inf->
1714 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1715
1716 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1717 sta_flags |= STA_FLG_HT40_EN_MSK;
1718 else
1719 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1720
1721 il->stations[idx].sta.station_flags = sta_flags;
1722done:
1723 return;
1724}
1725
1726/**
1727 * il_prep_station - Prepare station information for addition
1728 *
1729 * should be called with sta_lock held
1730 */
1731u8
1732il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
1733 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
1734{
1735 struct il_station_entry *station;
1736 int i;
1737 u8 sta_id = IL_INVALID_STATION;
1738 u16 rate;
1739
1740 if (is_ap)
1741 sta_id = ctx->ap_sta_id;
1742 else if (is_broadcast_ether_addr(addr))
1743 sta_id = ctx->bcast_sta_id;
1744 else
1745 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1746 if (!compare_ether_addr
1747 (il->stations[i].sta.sta.addr, addr)) {
1748 sta_id = i;
1749 break;
1750 }
1751
1752 if (!il->stations[i].used &&
1753 sta_id == IL_INVALID_STATION)
1754 sta_id = i;
1755 }
1756
1757 /*
1758 * These two conditions have the same outcome, but keep them
1759 * separate
1760 */
1761 if (unlikely(sta_id == IL_INVALID_STATION))
1762 return sta_id;
1763
1764 /*
1765 * uCode is not able to deal with multiple requests to add a
1766 * station. Keep track if one is in progress so that we do not send
1767 * another.
1768 */
1769 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1770 D_INFO("STA %d already in process of being added.\n", sta_id);
1771 return sta_id;
1772 }
1773
1774 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1775 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1776 !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
1777 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1778 sta_id, addr);
1779 return sta_id;
1780 }
1781
1782 station = &il->stations[sta_id];
1783 station->used = IL_STA_DRIVER_ACTIVE;
1784 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1785 il->num_stations++;
1786
1787 /* Set up the C_ADD_STA command to send to device */
1788 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1789 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1790 station->sta.mode = 0;
1791 station->sta.sta.sta_id = sta_id;
1792 station->sta.station_flags = ctx->station_flags;
1793 station->ctxid = ctx->ctxid;
1794
1795 if (sta) {
1796 struct il_station_priv_common *sta_priv;
1797
1798 sta_priv = (void *)sta->drv_priv;
1799 sta_priv->ctx = ctx;
1800 }
1801
1802 /*
1803 * OK to call unconditionally, since local stations (IBSS BSSID
1804 * STA and broadcast STA) pass in a NULL sta, and mac80211
1805 * doesn't allow HT IBSS.
1806 */
1807 il_set_ht_add_station(il, sta_id, sta, ctx);
1808
1809 /* 3945 only */
1810 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
1811 /* Turn on both antennas for the station... */
1812 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
1813
1814 return sta_id;
1815
1816}
1817EXPORT_SYMBOL_GPL(il_prep_station);
1818
1819#define STA_WAIT_TIMEOUT (HZ/2)
1820
1821/**
1822 * il_add_station_common -
1823 */
1824int
1825il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
1826 const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
1827 u8 *sta_id_r)
1828{
1829 unsigned long flags_spin;
1830 int ret = 0;
1831 u8 sta_id;
1832 struct il_addsta_cmd sta_cmd;
1833
1834 *sta_id_r = 0;
1835 spin_lock_irqsave(&il->sta_lock, flags_spin);
1836 sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
1837 if (sta_id == IL_INVALID_STATION) {
1838 IL_ERR("Unable to prepare station %pM for addition\n", addr);
1839 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1840 return -EINVAL;
1841 }
1842
1843 /*
1844 * uCode is not able to deal with multiple requests to add a
1845 * station. Keep track if one is in progress so that we do not send
1846 * another.
1847 */
1848 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1849 D_INFO("STA %d already in process of being added.\n", sta_id);
1850 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1851 return -EEXIST;
1852 }
1853
1854 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1855 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
1856 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1857 sta_id, addr);
1858 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1859 return -EEXIST;
1860 }
1861
1862 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
1863 memcpy(&sta_cmd, &il->stations[sta_id].sta,
1864 sizeof(struct il_addsta_cmd));
1865 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1866
1867 /* Add station to device's station table */
1868 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
1869 if (ret) {
1870 spin_lock_irqsave(&il->sta_lock, flags_spin);
1871 IL_ERR("Adding station %pM failed.\n",
1872 il->stations[sta_id].sta.sta.addr);
1873 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
1874 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
1875 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1876 }
1877 *sta_id_r = sta_id;
1878 return ret;
1879}
1880EXPORT_SYMBOL(il_add_station_common);
1881
1882/**
1883 * il_sta_ucode_deactivate - deactivate ucode status for a station
1884 *
1885 * il->sta_lock must be held
1886 */
1887static void
1888il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
1889{
1890 /* Ucode must be active and driver must be non active */
1891 if ((il->stations[sta_id].
1892 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
1893 IL_STA_UCODE_ACTIVE)
1894 IL_ERR("removed non active STA %u\n", sta_id);
1895
1896 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
1897
1898 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
1899 D_ASSOC("Removed STA %u\n", sta_id);
1900}
1901
1902static int
1903il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
1904 bool temporary)
1905{
1906 struct il_rx_pkt *pkt;
1907 int ret;
1908
1909 unsigned long flags_spin;
1910 struct il_rem_sta_cmd rm_sta_cmd;
1911
1912 struct il_host_cmd cmd = {
1913 .id = C_REM_STA,
1914 .len = sizeof(struct il_rem_sta_cmd),
1915 .flags = CMD_SYNC,
1916 .data = &rm_sta_cmd,
1917 };
1918
1919 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
1920 rm_sta_cmd.num_sta = 1;
1921 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
1922
1923 cmd.flags |= CMD_WANT_SKB;
1924
1925 ret = il_send_cmd(il, &cmd);
1926
1927 if (ret)
1928 return ret;
1929
1930 pkt = (struct il_rx_pkt *)cmd.reply_page;
1931 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1932 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
1933 ret = -EIO;
1934 }
1935
1936 if (!ret) {
1937 switch (pkt->u.rem_sta.status) {
1938 case REM_STA_SUCCESS_MSK:
1939 if (!temporary) {
1940 spin_lock_irqsave(&il->sta_lock, flags_spin);
1941 il_sta_ucode_deactivate(il, sta_id);
1942 spin_unlock_irqrestore(&il->sta_lock,
1943 flags_spin);
1944 }
1945 D_ASSOC("C_REM_STA PASSED\n");
1946 break;
1947 default:
1948 ret = -EIO;
1949 IL_ERR("C_REM_STA failed\n");
1950 break;
1951 }
1952 }
1953 il_free_pages(il, cmd.reply_page);
1954
1955 return ret;
1956}
1957
1958/**
1959 * il_remove_station - Remove driver's knowledge of station.
1960 */
1961int
1962il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
1963{
1964 unsigned long flags;
1965
1966 if (!il_is_ready(il)) {
1967 D_INFO("Unable to remove station %pM, device not ready.\n",
1968 addr);
1969 /*
1970 * It is typical for stations to be removed when we are
1971 * going down. Return success since device will be down
1972 * soon anyway
1973 */
1974 return 0;
1975 }
1976
1977 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
1978
1979 if (WARN_ON(sta_id == IL_INVALID_STATION))
1980 return -EINVAL;
1981
1982 spin_lock_irqsave(&il->sta_lock, flags);
1983
1984 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
1985 D_INFO("Removing %pM but non DRIVER active\n", addr);
1986 goto out_err;
1987 }
1988
1989 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
1990 D_INFO("Removing %pM but non UCODE active\n", addr);
1991 goto out_err;
1992 }
1993
1994 if (il->stations[sta_id].used & IL_STA_LOCAL) {
1995 kfree(il->stations[sta_id].lq);
1996 il->stations[sta_id].lq = NULL;
1997 }
1998
1999 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2000
2001 il->num_stations--;
2002
2003 BUG_ON(il->num_stations < 0);
2004
2005 spin_unlock_irqrestore(&il->sta_lock, flags);
2006
2007 return il_send_remove_station(il, addr, sta_id, false);
2008out_err:
2009 spin_unlock_irqrestore(&il->sta_lock, flags);
2010 return -EINVAL;
2011}
2012EXPORT_SYMBOL_GPL(il_remove_station);
2013
2014/**
2015 * il_clear_ucode_stations - clear ucode station table bits
2016 *
2017 * This function clears all the bits in the driver indicating
2018 * which stations are active in the ucode. Call when something
2019 * other than explicit station management would cause this in
2020 * the ucode, e.g. unassociated RXON.
2021 */
2022void
2023il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
2024{
2025 int i;
2026 unsigned long flags_spin;
2027 bool cleared = false;
2028
2029 D_INFO("Clearing ucode stations in driver\n");
2030
2031 spin_lock_irqsave(&il->sta_lock, flags_spin);
2032 for (i = 0; i < il->hw_params.max_stations; i++) {
2033 if (ctx && ctx->ctxid != il->stations[i].ctxid)
2034 continue;
2035
2036 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2037 D_INFO("Clearing ucode active for station %d\n", i);
2038 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2039 cleared = true;
2040 }
2041 }
2042 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2043
2044 if (!cleared)
2045 D_INFO("No active stations found to be cleared\n");
2046}
2047EXPORT_SYMBOL(il_clear_ucode_stations);
2048
2049/**
2050 * il_restore_stations() - Restore driver known stations to device
2051 *
2052 * All stations considered active by driver, but not present in ucode, is
2053 * restored.
2054 *
2055 * Function sleeps.
2056 */
2057void
2058il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
2059{
2060 struct il_addsta_cmd sta_cmd;
2061 struct il_link_quality_cmd lq;
2062 unsigned long flags_spin;
2063 int i;
2064 bool found = false;
2065 int ret;
2066 bool send_lq;
2067
2068 if (!il_is_ready(il)) {
2069 D_INFO("Not ready yet, not restoring any stations.\n");
2070 return;
2071 }
2072
2073 D_ASSOC("Restoring all known stations ... start.\n");
2074 spin_lock_irqsave(&il->sta_lock, flags_spin);
2075 for (i = 0; i < il->hw_params.max_stations; i++) {
2076 if (ctx->ctxid != il->stations[i].ctxid)
2077 continue;
2078 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2079 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2080 D_ASSOC("Restoring sta %pM\n",
2081 il->stations[i].sta.sta.addr);
2082 il->stations[i].sta.mode = 0;
2083 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2084 found = true;
2085 }
2086 }
2087
2088 for (i = 0; i < il->hw_params.max_stations; i++) {
2089 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2090 memcpy(&sta_cmd, &il->stations[i].sta,
2091 sizeof(struct il_addsta_cmd));
2092 send_lq = false;
2093 if (il->stations[i].lq) {
2094 memcpy(&lq, il->stations[i].lq,
2095 sizeof(struct il_link_quality_cmd));
2096 send_lq = true;
2097 }
2098 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2099 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2100 if (ret) {
2101 spin_lock_irqsave(&il->sta_lock, flags_spin);
2102 IL_ERR("Adding station %pM failed.\n",
2103 il->stations[i].sta.sta.addr);
2104 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2105 il->stations[i].used &=
2106 ~IL_STA_UCODE_INPROGRESS;
2107 spin_unlock_irqrestore(&il->sta_lock,
2108 flags_spin);
2109 }
2110 /*
2111 * Rate scaling has already been initialized, send
2112 * current LQ command
2113 */
2114 if (send_lq)
2115 il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
2116 spin_lock_irqsave(&il->sta_lock, flags_spin);
2117 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2118 }
2119 }
2120
2121 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2122 if (!found)
2123 D_INFO("Restoring all known stations"
2124 " .... no stations to be restored.\n");
2125 else
2126 D_INFO("Restoring all known stations" " .... complete.\n");
2127}
2128EXPORT_SYMBOL(il_restore_stations);
2129
2130int
2131il_get_free_ucode_key_idx(struct il_priv *il)
2132{
2133 int i;
2134
2135 for (i = 0; i < il->sta_key_max_num; i++)
2136 if (!test_and_set_bit(i, &il->ucode_key_table))
2137 return i;
2138
2139 return WEP_INVALID_OFFSET;
2140}
2141EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2142
2143void
2144il_dealloc_bcast_stations(struct il_priv *il)
2145{
2146 unsigned long flags;
2147 int i;
2148
2149 spin_lock_irqsave(&il->sta_lock, flags);
2150 for (i = 0; i < il->hw_params.max_stations; i++) {
2151 if (!(il->stations[i].used & IL_STA_BCAST))
2152 continue;
2153
2154 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2155 il->num_stations--;
2156 BUG_ON(il->num_stations < 0);
2157 kfree(il->stations[i].lq);
2158 il->stations[i].lq = NULL;
2159 }
2160 spin_unlock_irqrestore(&il->sta_lock, flags);
2161}
2162EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2163
2164#ifdef CONFIG_IWLEGACY_DEBUG
2165static void
2166il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2167{
2168 int i;
2169 D_RATE("lq station id 0x%x\n", lq->sta_id);
2170 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2171 lq->general_params.dual_stream_ant_msk);
2172
2173 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2174 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2175}
2176#else
2177static inline void
2178il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2179{
2180}
2181#endif
2182
2183/**
2184 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
2185 *
2186 * It sometimes happens when a HT rate has been in use and we
2187 * loose connectivity with AP then mac80211 will first tell us that the
2188 * current channel is not HT anymore before removing the station. In such a
2189 * scenario the RXON flags will be updated to indicate we are not
2190 * communicating HT anymore, but the LQ command may still contain HT rates.
2191 * Test for this to prevent driver from sending LQ command between the time
2192 * RXON flags are updated and when LQ command is updated.
2193 */
2194static bool
2195il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
2196 struct il_link_quality_cmd *lq)
2197{
2198 int i;
2199
2200 if (ctx->ht.enabled)
2201 return true;
2202
2203 D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
2204 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2205 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2206 D_INFO("idx %d of LQ expects HT channel\n", i);
2207 return false;
2208 }
2209 }
2210 return true;
2211}
2212
2213/**
2214 * il_send_lq_cmd() - Send link quality command
2215 * @init: This command is sent as part of station initialization right
2216 * after station has been added.
2217 *
2218 * The link quality command is sent as the last step of station creation.
2219 * This is the special case in which init is set and we call a callback in
2220 * this case to clear the state indicating that station creation is in
2221 * progress.
2222 */
2223int
2224il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2225 struct il_link_quality_cmd *lq, u8 flags, bool init)
2226{
2227 int ret = 0;
2228 unsigned long flags_spin;
2229
2230 struct il_host_cmd cmd = {
2231 .id = C_TX_LINK_QUALITY_CMD,
2232 .len = sizeof(struct il_link_quality_cmd),
2233 .flags = flags,
2234 .data = lq,
2235 };
2236
2237 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2238 return -EINVAL;
2239
2240 spin_lock_irqsave(&il->sta_lock, flags_spin);
2241 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2242 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2243 return -EINVAL;
2244 }
2245 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2246
2247 il_dump_lq_cmd(il, lq);
2248 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2249
2250 if (il_is_lq_table_valid(il, ctx, lq))
2251 ret = il_send_cmd(il, &cmd);
2252 else
2253 ret = -EINVAL;
2254
2255 if (cmd.flags & CMD_ASYNC)
2256 return ret;
2257
2258 if (init) {
2259 D_INFO("init LQ command complete,"
2260 " clearing sta addition status for sta %d\n",
2261 lq->sta_id);
2262 spin_lock_irqsave(&il->sta_lock, flags_spin);
2263 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2264 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2265 }
2266 return ret;
2267}
2268EXPORT_SYMBOL(il_send_lq_cmd);
2269
2270int
2271il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2272 struct ieee80211_sta *sta)
2273{
2274 struct il_priv *il = hw->priv;
2275 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2276 int ret;
2277
2278 D_INFO("received request to remove station %pM\n", sta->addr);
2279 mutex_lock(&il->mutex);
2280 D_INFO("proceeding to remove station %pM\n", sta->addr);
2281 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2282 if (ret)
2283 IL_ERR("Error removing station %pM\n", sta->addr);
2284 mutex_unlock(&il->mutex);
2285 return ret;
2286}
2287EXPORT_SYMBOL(il_mac_sta_remove);
2288
2289/************************** RX-FUNCTIONS ****************************/
2290/*
2291 * Rx theory of operation
2292 *
2293 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
2294 * each of which point to Receive Buffers to be filled by the NIC. These get
2295 * used not only for Rx frames, but for any command response or notification
2296 * from the NIC. The driver and NIC manage the Rx buffers by means
2297 * of idxes into the circular buffer.
2298 *
2299 * Rx Queue Indexes
2300 * The host/firmware share two idx registers for managing the Rx buffers.
2301 *
2302 * The READ idx maps to the first position that the firmware may be writing
2303 * to -- the driver can read up to (but not including) this position and get
2304 * good data.
2305 * The READ idx is managed by the firmware once the card is enabled.
2306 *
2307 * The WRITE idx maps to the last position the driver has read from -- the
2308 * position preceding WRITE is the last slot the firmware can place a packet.
2309 *
2310 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2311 * WRITE = READ.
2312 *
2313 * During initialization, the host sets up the READ queue position to the first
2314 * IDX position, and WRITE to the last (READ - 1 wrapped)
2315 *
2316 * When the firmware places a packet in a buffer, it will advance the READ idx
2317 * and fire the RX interrupt. The driver can then query the READ idx and
2318 * process as many packets as possible, moving the WRITE idx forward as it
2319 * resets the Rx queue buffers with new memory.
2320 *
2321 * The management in the driver is as follows:
2322 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2323 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2324 * to replenish the iwl->rxq->rx_free.
2325 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
2326 * iwl->rxq is replenished and the READ IDX is updated (updating the
2327 * 'processed' and 'read' driver idxes as well)
2328 * + A received packet is processed and handed to the kernel network stack,
2329 * detached from the iwl->rxq. The driver 'processed' idx is updated.
2330 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2331 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2332 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
2333 * were enough free buffers and RX_STALLED is set it is cleared.
2334 *
2335 *
2336 * Driver sequence:
2337 *
2338 * il_rx_queue_alloc() Allocates rx_free
2339 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
2340 * il_rx_queue_restock
2341 * il_rx_queue_restock() Moves available buffers from rx_free into Rx
2342 * queue, updates firmware pointers, and updates
2343 * the WRITE idx. If insufficient rx_free buffers
2344 * are available, schedules il_rx_replenish
2345 *
2346 * -- enable interrupts --
2347 * ISR - il_rx() Detach il_rx_bufs from pool up to the
2348 * READ IDX, detaching the SKB from the pool.
2349 * Moves the packet buffer from queue to rx_used.
2350 * Calls il_rx_queue_restock to refill any empty
2351 * slots.
2352 * ...
2353 *
2354 */
2355
2356/**
2357 * il_rx_queue_space - Return number of free slots available in queue.
2358 */
2359int
2360il_rx_queue_space(const struct il_rx_queue *q)
2361{
2362 int s = q->read - q->write;
2363 if (s <= 0)
2364 s += RX_QUEUE_SIZE;
2365 /* keep some buffer to not confuse full and empty queue */
2366 s -= 2;
2367 if (s < 0)
2368 s = 0;
2369 return s;
2370}
2371EXPORT_SYMBOL(il_rx_queue_space);
2372
2373/**
2374 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
2375 */
2376void
2377il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2378{
2379 unsigned long flags;
2380 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2381 u32 reg;
2382
2383 spin_lock_irqsave(&q->lock, flags);
2384
2385 if (q->need_update == 0)
2386 goto exit_unlock;
2387
2388 /* If power-saving is in use, make sure device is awake */
2389 if (test_bit(S_POWER_PMI, &il->status)) {
2390 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2391
2392 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2393 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2394 reg);
2395 il_set_bit(il, CSR_GP_CNTRL,
2396 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2397 goto exit_unlock;
2398 }
2399
2400 q->write_actual = (q->write & ~0x7);
2401 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2402
2403 /* Else device is assumed to be awake */
2404 } else {
2405 /* Device expects a multiple of 8 */
2406 q->write_actual = (q->write & ~0x7);
2407 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2408 }
2409
2410 q->need_update = 0;
2411
2412exit_unlock:
2413 spin_unlock_irqrestore(&q->lock, flags);
2414}
2415EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2416
2417int
2418il_rx_queue_alloc(struct il_priv *il)
2419{
2420 struct il_rx_queue *rxq = &il->rxq;
2421 struct device *dev = &il->pci_dev->dev;
2422 int i;
2423
2424 spin_lock_init(&rxq->lock);
2425 INIT_LIST_HEAD(&rxq->rx_free);
2426 INIT_LIST_HEAD(&rxq->rx_used);
2427
2428 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2429 rxq->bd =
2430 dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2431 GFP_KERNEL);
2432 if (!rxq->bd)
2433 goto err_bd;
2434
2435 rxq->rb_stts =
2436 dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2437 &rxq->rb_stts_dma, GFP_KERNEL);
2438 if (!rxq->rb_stts)
2439 goto err_rb;
2440
2441 /* Fill the rx_used queue with _all_ of the Rx buffers */
2442 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2443 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2444
2445 /* Set us so that we have processed and used all buffers, but have
2446 * not restocked the Rx queue with fresh buffers */
2447 rxq->read = rxq->write = 0;
2448 rxq->write_actual = 0;
2449 rxq->free_count = 0;
2450 rxq->need_update = 0;
2451 return 0;
2452
2453err_rb:
2454 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2455 rxq->bd_dma);
2456err_bd:
2457 return -ENOMEM;
2458}
2459EXPORT_SYMBOL(il_rx_queue_alloc);
2460
2461void
2462il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2463{
2464 struct il_rx_pkt *pkt = rxb_addr(rxb);
2465 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2466
2467 if (!report->state) {
2468 D_11H("Spectrum Measure Notification: Start\n");
2469 return;
2470 }
2471
2472 memcpy(&il->measure_report, report, sizeof(*report));
2473 il->measurement_status |= MEASUREMENT_READY;
2474}
2475EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2476
2477/*
2478 * returns non-zero if packet should be dropped
2479 */
2480int
2481il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2482 u32 decrypt_res, struct ieee80211_rx_status *stats)
2483{
2484 u16 fc = le16_to_cpu(hdr->frame_control);
2485
2486 /*
2487 * All contexts have the same setting here due to it being
2488 * a module parameter, so OK to check any context.
2489 */
2490 if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2491 return 0;
2492
2493 if (!(fc & IEEE80211_FCTL_PROTECTED))
2494 return 0;
2495
2496 D_RX("decrypt_res:0x%x\n", decrypt_res);
2497 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2498 case RX_RES_STATUS_SEC_TYPE_TKIP:
2499 /* The uCode has got a bad phase 1 Key, pushes the packet.
2500 * Decryption will be done in SW. */
2501 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2502 RX_RES_STATUS_BAD_KEY_TTAK)
2503 break;
2504
2505 case RX_RES_STATUS_SEC_TYPE_WEP:
2506 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2507 RX_RES_STATUS_BAD_ICV_MIC) {
2508 /* bad ICV, the packet is destroyed since the
2509 * decryption is inplace, drop it */
2510 D_RX("Packet destroyed\n");
2511 return -1;
2512 }
2513 case RX_RES_STATUS_SEC_TYPE_CCMP:
2514 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2515 RX_RES_STATUS_DECRYPT_OK) {
2516 D_RX("hw decrypt successfully!!!\n");
2517 stats->flag |= RX_FLAG_DECRYPTED;
2518 }
2519 break;
2520
2521 default:
2522 break;
2523 }
2524 return 0;
2525}
2526EXPORT_SYMBOL(il_set_decrypted_flag);
2527
2528/**
2529 * il_txq_update_write_ptr - Send new write idx to hardware
2530 */
2531void
2532il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2533{
2534 u32 reg = 0;
2535 int txq_id = txq->q.id;
2536
2537 if (txq->need_update == 0)
2538 return;
2539
2540 /* if we're trying to save power */
2541 if (test_bit(S_POWER_PMI, &il->status)) {
2542 /* wake up nic if it's powered down ...
2543 * uCode will wake up, and interrupt us again, so next
2544 * time we'll skip this part. */
2545 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2546
2547 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2548 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2549 txq_id, reg);
2550 il_set_bit(il, CSR_GP_CNTRL,
2551 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2552 return;
2553 }
2554
2555 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2556
2557 /*
2558 * else not in power-save mode,
2559 * uCode will never sleep when we're
2560 * trying to tx (during RFKILL, we're not trying to tx).
2561 */
2562 } else
2563 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2564 txq->need_update = 0;
2565}
2566EXPORT_SYMBOL(il_txq_update_write_ptr);
2567
2568/**
2569 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
2570 */
2571void
2572il_tx_queue_unmap(struct il_priv *il, int txq_id)
2573{
2574 struct il_tx_queue *txq = &il->txq[txq_id];
2575 struct il_queue *q = &txq->q;
2576
2577 if (q->n_bd == 0)
2578 return;
2579
2580 while (q->write_ptr != q->read_ptr) {
2581 il->cfg->ops->lib->txq_free_tfd(il, txq);
2582 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2583 }
2584}
2585EXPORT_SYMBOL(il_tx_queue_unmap);
2586
2587/**
2588 * il_tx_queue_free - Deallocate DMA queue.
2589 * @txq: Transmit queue to deallocate.
2590 *
2591 * Empty queue by removing and destroying all BD's.
2592 * Free all buffers.
2593 * 0-fill, but do not free "txq" descriptor structure.
2594 */
2595void
2596il_tx_queue_free(struct il_priv *il, int txq_id)
2597{
2598 struct il_tx_queue *txq = &il->txq[txq_id];
2599 struct device *dev = &il->pci_dev->dev;
2600 int i;
2601
2602 il_tx_queue_unmap(il, txq_id);
2603
2604 /* De-alloc array of command/tx buffers */
2605 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2606 kfree(txq->cmd[i]);
2607
2608 /* De-alloc circular buffer of TFDs */
2609 if (txq->q.n_bd)
2610 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2611 txq->tfds, txq->q.dma_addr);
2612
2613 /* De-alloc array of per-TFD driver data */
2614 kfree(txq->txb);
2615 txq->txb = NULL;
2616
2617 /* deallocate arrays */
2618 kfree(txq->cmd);
2619 kfree(txq->meta);
2620 txq->cmd = NULL;
2621 txq->meta = NULL;
2622
2623 /* 0-fill queue descriptor structure */
2624 memset(txq, 0, sizeof(*txq));
2625}
2626EXPORT_SYMBOL(il_tx_queue_free);
2627
2628/**
2629 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
2630 */
2631void
2632il_cmd_queue_unmap(struct il_priv *il)
2633{
2634 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2635 struct il_queue *q = &txq->q;
2636 int i;
2637
2638 if (q->n_bd == 0)
2639 return;
2640
2641 while (q->read_ptr != q->write_ptr) {
2642 i = il_get_cmd_idx(q, q->read_ptr, 0);
2643
2644 if (txq->meta[i].flags & CMD_MAPPED) {
2645 pci_unmap_single(il->pci_dev,
2646 dma_unmap_addr(&txq->meta[i], mapping),
2647 dma_unmap_len(&txq->meta[i], len),
2648 PCI_DMA_BIDIRECTIONAL);
2649 txq->meta[i].flags = 0;
2650 }
2651
2652 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2653 }
2654
2655 i = q->n_win;
2656 if (txq->meta[i].flags & CMD_MAPPED) {
2657 pci_unmap_single(il->pci_dev,
2658 dma_unmap_addr(&txq->meta[i], mapping),
2659 dma_unmap_len(&txq->meta[i], len),
2660 PCI_DMA_BIDIRECTIONAL);
2661 txq->meta[i].flags = 0;
2662 }
2663}
2664EXPORT_SYMBOL(il_cmd_queue_unmap);
2665
2666/**
2667 * il_cmd_queue_free - Deallocate DMA queue.
2668 * @txq: Transmit queue to deallocate.
2669 *
2670 * Empty queue by removing and destroying all BD's.
2671 * Free all buffers.
2672 * 0-fill, but do not free "txq" descriptor structure.
2673 */
2674void
2675il_cmd_queue_free(struct il_priv *il)
2676{
2677 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2678 struct device *dev = &il->pci_dev->dev;
2679 int i;
2680
2681 il_cmd_queue_unmap(il);
2682
2683 /* De-alloc array of command/tx buffers */
2684 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2685 kfree(txq->cmd[i]);
2686
2687 /* De-alloc circular buffer of TFDs */
2688 if (txq->q.n_bd)
2689 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2690 txq->tfds, txq->q.dma_addr);
2691
2692 /* deallocate arrays */
2693 kfree(txq->cmd);
2694 kfree(txq->meta);
2695 txq->cmd = NULL;
2696 txq->meta = NULL;
2697
2698 /* 0-fill queue descriptor structure */
2699 memset(txq, 0, sizeof(*txq));
2700}
2701EXPORT_SYMBOL(il_cmd_queue_free);
2702
2703/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
2704 * DMA services
2705 *
2706 * Theory of operation
2707 *
2708 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
2709 * of buffer descriptors, each of which points to one or more data buffers for
2710 * the device to read from or fill. Driver and device exchange status of each
2711 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
2712 * entries in each circular buffer, to protect against confusing empty and full
2713 * queue states.
2714 *
2715 * The device reads or writes the data in the queues via the device's several
2716 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
2717 *
2718 * For Tx queue, there are low mark and high mark limits. If, after queuing
2719 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2720 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2721 * Tx queue resumed.
2722 *
2723 * See more detailed info in 4965.h.
2724 ***************************************************/
2725
2726int
2727il_queue_space(const struct il_queue *q)
2728{
2729 int s = q->read_ptr - q->write_ptr;
2730
2731 if (q->read_ptr > q->write_ptr)
2732 s -= q->n_bd;
2733
2734 if (s <= 0)
2735 s += q->n_win;
2736 /* keep some reserve to not confuse empty and full situations */
2737 s -= 2;
2738 if (s < 0)
2739 s = 0;
2740 return s;
2741}
2742EXPORT_SYMBOL(il_queue_space);
2743
2744
2745/**
2746 * il_queue_init - Initialize queue's high/low-water and read/write idxes
2747 */
2748static int
2749il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
2750 u32 id)
2751{
2752 q->n_bd = count;
2753 q->n_win = slots_num;
2754 q->id = id;
2755
2756 /* count must be power-of-two size, otherwise il_queue_inc_wrap
2757 * and il_queue_dec_wrap are broken. */
2758 BUG_ON(!is_power_of_2(count));
2759
2760 /* slots_num must be power-of-two size, otherwise
2761 * il_get_cmd_idx is broken. */
2762 BUG_ON(!is_power_of_2(slots_num));
2763
2764 q->low_mark = q->n_win / 4;
2765 if (q->low_mark < 4)
2766 q->low_mark = 4;
2767
2768 q->high_mark = q->n_win / 8;
2769 if (q->high_mark < 2)
2770 q->high_mark = 2;
2771
2772 q->write_ptr = q->read_ptr = 0;
2773
2774 return 0;
2775}
2776
2777/**
2778 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
2779 */
2780static int
2781il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2782{
2783 struct device *dev = &il->pci_dev->dev;
2784 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2785
2786 /* Driver ilate data, only for Tx (not command) queues,
2787 * not shared with device. */
2788 if (id != il->cmd_queue) {
2789 txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
2790 GFP_KERNEL);
2791 if (!txq->txb) {
2792 IL_ERR("kmalloc for auxiliary BD "
2793 "structures failed\n");
2794 goto error;
2795 }
2796 } else {
2797 txq->txb = NULL;
2798 }
2799
2800 /* Circular buffer of transmit frame descriptors (TFDs),
2801 * shared with device */
2802 txq->tfds =
2803 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2804 if (!txq->tfds) {
2805 IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
2806 goto error;
2807 }
2808 txq->q.id = id;
2809
2810 return 0;
2811
2812error:
2813 kfree(txq->txb);
2814 txq->txb = NULL;
2815
2816 return -ENOMEM;
2817}
2818
2819/**
2820 * il_tx_queue_init - Allocate and initialize one tx/cmd queue
2821 */
2822int
2823il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
2824 u32 txq_id)
2825{
2826 int i, len;
2827 int ret;
2828 int actual_slots = slots_num;
2829
2830 /*
2831 * Alloc buffer array for commands (Tx or other types of commands).
2832 * For the command queue (#4/#9), allocate command space + one big
2833 * command for scan, since scan command is very huge; the system will
2834 * not have two scans at the same time, so only one is needed.
2835 * For normal Tx queues (all other queues), no super-size command
2836 * space is needed.
2837 */
2838 if (txq_id == il->cmd_queue)
2839 actual_slots++;
2840
2841 txq->meta =
2842 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
2843 txq->cmd =
2844 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
2845
2846 if (!txq->meta || !txq->cmd)
2847 goto out_free_arrays;
2848
2849 len = sizeof(struct il_device_cmd);
2850 for (i = 0; i < actual_slots; i++) {
2851 /* only happens for cmd queue */
2852 if (i == slots_num)
2853 len = IL_MAX_CMD_SIZE;
2854
2855 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
2856 if (!txq->cmd[i])
2857 goto err;
2858 }
2859
2860 /* Alloc driver data array and TFD circular buffer */
2861 ret = il_tx_queue_alloc(il, txq, txq_id);
2862 if (ret)
2863 goto err;
2864
2865 txq->need_update = 0;
2866
2867 /*
2868 * For the default queues 0-3, set up the swq_id
2869 * already -- all others need to get one later
2870 * (if they need one at all).
2871 */
2872 if (txq_id < 4)
2873 il_set_swq_id(txq, txq_id, txq_id);
2874
2875 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
2876 * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
2877 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2878
2879 /* Initialize queue's high/low-water marks, and head/tail idxes */
2880 il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
2881
2882 /* Tell device where to find queue */
2883 il->cfg->ops->lib->txq_init(il, txq);
2884
2885 return 0;
2886err:
2887 for (i = 0; i < actual_slots; i++)
2888 kfree(txq->cmd[i]);
2889out_free_arrays:
2890 kfree(txq->meta);
2891 kfree(txq->cmd);
2892
2893 return -ENOMEM;
2894}
2895EXPORT_SYMBOL(il_tx_queue_init);
2896
2897void
2898il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
2899 u32 txq_id)
2900{
2901 int actual_slots = slots_num;
2902
2903 if (txq_id == il->cmd_queue)
2904 actual_slots++;
2905
2906 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
2907
2908 txq->need_update = 0;
2909
2910 /* Initialize queue's high/low-water marks, and head/tail idxes */
2911 il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
2912
2913 /* Tell device where to find queue */
2914 il->cfg->ops->lib->txq_init(il, txq);
2915}
2916EXPORT_SYMBOL(il_tx_queue_reset);
2917
2918/*************** HOST COMMAND QUEUE FUNCTIONS *****/
2919
2920/**
2921 * il_enqueue_hcmd - enqueue a uCode command
2922 * @il: device ilate data point
2923 * @cmd: a point to the ucode command structure
2924 *
2925 * The function returns < 0 values to indicate the operation is
2926 * failed. On success, it turns the idx (> 0) of command in the
2927 * command queue.
2928 */
2929int
2930il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
2931{
2932 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2933 struct il_queue *q = &txq->q;
2934 struct il_device_cmd *out_cmd;
2935 struct il_cmd_meta *out_meta;
2936 dma_addr_t phys_addr;
2937 unsigned long flags;
2938 int len;
2939 u32 idx;
2940 u16 fix_size;
2941
2942 cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
2943 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
2944
2945 /* If any of the command structures end up being larger than
2946 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
2947 * we will need to increase the size of the TFD entries
2948 * Also, check to see if command buffer should not exceed the size
2949 * of device_cmd and max_cmd_size. */
2950 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
2951 !(cmd->flags & CMD_SIZE_HUGE));
2952 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
2953
2954 if (il_is_rfkill(il) || il_is_ctkill(il)) {
2955 IL_WARN("Not sending command - %s KILL\n",
2956 il_is_rfkill(il) ? "RF" : "CT");
2957 return -EIO;
2958 }
2959
2960 spin_lock_irqsave(&il->hcmd_lock, flags);
2961
2962 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
2963 spin_unlock_irqrestore(&il->hcmd_lock, flags);
2964
2965 IL_ERR("Restarting adapter due to command queue full\n");
2966 queue_work(il->workqueue, &il->restart);
2967 return -ENOSPC;
2968 }
2969
2970 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
2971 out_cmd = txq->cmd[idx];
2972 out_meta = &txq->meta[idx];
2973
2974 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
2975 spin_unlock_irqrestore(&il->hcmd_lock, flags);
2976 return -ENOSPC;
2977 }
2978
2979 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
2980 out_meta->flags = cmd->flags | CMD_MAPPED;
2981 if (cmd->flags & CMD_WANT_SKB)
2982 out_meta->source = cmd;
2983 if (cmd->flags & CMD_ASYNC)
2984 out_meta->callback = cmd->callback;
2985
2986 out_cmd->hdr.cmd = cmd->id;
2987 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
2988
2989 /* At this point, the out_cmd now has all of the incoming cmd
2990 * information */
2991
2992 out_cmd->hdr.flags = 0;
2993 out_cmd->hdr.sequence =
2994 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
2995 if (cmd->flags & CMD_SIZE_HUGE)
2996 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
2997 len = sizeof(struct il_device_cmd);
2998 if (idx == TFD_CMD_SLOTS)
2999 len = IL_MAX_CMD_SIZE;
3000
3001#ifdef CONFIG_IWLEGACY_DEBUG
3002 switch (out_cmd->hdr.cmd) {
3003 case C_TX_LINK_QUALITY_CMD:
3004 case C_SENSITIVITY:
3005 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3006 "%d bytes at %d[%d]:%d\n",
3007 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3008 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3009 q->write_ptr, idx, il->cmd_queue);
3010 break;
3011 default:
3012 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3013 "%d bytes at %d[%d]:%d\n",
3014 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3015 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3016 idx, il->cmd_queue);
3017 }
3018#endif
3019 txq->need_update = 1;
3020
3021 if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
3022 /* Set up entry in queue's byte count circular buffer */
3023 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
3024
3025 phys_addr =
3026 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
3027 PCI_DMA_BIDIRECTIONAL);
3028 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3029 dma_unmap_len_set(out_meta, len, fix_size);
3030
3031 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
3032 1, U32_PAD(cmd->len));
3033
3034 /* Increment and update queue's write idx */
3035 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3036 il_txq_update_write_ptr(il, txq);
3037
3038 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3039 return idx;
3040}
3041
3042/**
3043 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
3044 *
3045 * When FW advances 'R' idx, all entries between old and new 'R' idx
3046 * need to be reclaimed. As result, some free space forms. If there is
3047 * enough free space (> low mark), wake the stack that feeds us.
3048 */
3049static void
3050il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3051{
3052 struct il_tx_queue *txq = &il->txq[txq_id];
3053 struct il_queue *q = &txq->q;
3054 int nfreed = 0;
3055
3056 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3057 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3058 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3059 q->write_ptr, q->read_ptr);
3060 return;
3061 }
3062
3063 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3064 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3065
3066 if (nfreed++ > 0) {
3067 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3068 q->write_ptr, q->read_ptr);
3069 queue_work(il->workqueue, &il->restart);
3070 }
3071
3072 }
3073}
3074
3075/**
3076 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3077 * @rxb: Rx buffer to reclaim
3078 *
3079 * If an Rx buffer has an async callback associated with it the callback
3080 * will be executed. The attached skb (if present) will only be freed
3081 * if the callback returns 1
3082 */
3083void
3084il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3085{
3086 struct il_rx_pkt *pkt = rxb_addr(rxb);
3087 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3088 int txq_id = SEQ_TO_QUEUE(sequence);
3089 int idx = SEQ_TO_IDX(sequence);
3090 int cmd_idx;
3091 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3092 struct il_device_cmd *cmd;
3093 struct il_cmd_meta *meta;
3094 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3095 unsigned long flags;
3096
3097 /* If a Tx command is being handled and it isn't in the actual
3098 * command queue then there a command routing bug has been introduced
3099 * in the queue management code. */
3100 if (WARN
3101 (txq_id != il->cmd_queue,
3102 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3103 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3104 il->txq[il->cmd_queue].q.write_ptr)) {
3105 il_print_hex_error(il, pkt, 32);
3106 return;
3107 }
3108
3109 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3110 cmd = txq->cmd[cmd_idx];
3111 meta = &txq->meta[cmd_idx];
3112
3113 txq->time_stamp = jiffies;
3114
3115 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
3116 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
3117
3118 /* Input error checking is done when commands are added to queue. */
3119 if (meta->flags & CMD_WANT_SKB) {
3120 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3121 rxb->page = NULL;
3122 } else if (meta->callback)
3123 meta->callback(il, cmd, pkt);
3124
3125 spin_lock_irqsave(&il->hcmd_lock, flags);
3126
3127 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3128
3129 if (!(meta->flags & CMD_ASYNC)) {
3130 clear_bit(S_HCMD_ACTIVE, &il->status);
3131 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3132 il_get_cmd_string(cmd->hdr.cmd));
3133 wake_up(&il->wait_command_queue);
3134 }
3135
3136 /* Mark as unmapped */
3137 meta->flags = 0;
3138
3139 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3140}
3141EXPORT_SYMBOL(il_tx_cmd_complete);
3142
3143MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3144MODULE_VERSION(IWLWIFI_VERSION);
3145MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3146MODULE_LICENSE("GPL");
3147
3148/*
3149 * set bt_coex_active to true, uCode will do kill/defer
3150 * every time the priority line is asserted (BT is sending signals on the
3151 * priority line in the PCIx).
3152 * set bt_coex_active to false, uCode will ignore the BT activity and
3153 * perform the normal operation
3154 *
3155 * User might experience transmit issue on some platform due to WiFi/BT
3156 * co-exist problem. The possible behaviors are:
3157 * Able to scan and finding all the available AP
3158 * Not able to associate with any AP
3159 * On those platforms, WiFi communication can be restored by set
3160 * "bt_coex_active" module parameter to "false"
3161 *
3162 * default: bt_coex_active = true (BT_COEX_ENABLE)
3163 */
3164static bool bt_coex_active = true;
3165module_param(bt_coex_active, bool, S_IRUGO);
3166MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3167
3168u32 il_debug_level;
3169EXPORT_SYMBOL(il_debug_level);
3170
3171const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3172EXPORT_SYMBOL(il_bcast_addr);
3173
3174/* This function both allocates and initializes hw and il. */
3175struct ieee80211_hw *
3176il_alloc_all(struct il_cfg *cfg)
3177{
3178 struct il_priv *il;
3179 /* mac80211 allocates memory for this device instance, including
3180 * space for this driver's ilate structure */
3181 struct ieee80211_hw *hw;
3182
3183 hw = ieee80211_alloc_hw(sizeof(struct il_priv),
3184 cfg->ops->ieee80211_ops);
3185 if (hw == NULL) {
3186 pr_err("%s: Can not allocate network device\n", cfg->name);
3187 goto out;
3188 }
3189
3190 il = hw->priv;
3191 il->hw = hw;
3192
3193out:
3194 return hw;
3195}
3196EXPORT_SYMBOL(il_alloc_all);
3197
3198#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
3199#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
3200static void
3201il_init_ht_hw_capab(const struct il_priv *il,
3202 struct ieee80211_sta_ht_cap *ht_info,
3203 enum ieee80211_band band)
3204{
3205 u16 max_bit_rate = 0;
3206 u8 rx_chains_num = il->hw_params.rx_chains_num;
3207 u8 tx_chains_num = il->hw_params.tx_chains_num;
3208
3209 ht_info->cap = 0;
3210 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3211
3212 ht_info->ht_supported = true;
3213
3214 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3215 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3216 if (il->hw_params.ht40_channel & BIT(band)) {
3217 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3218 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3219 ht_info->mcs.rx_mask[4] = 0x01;
3220 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3221 }
3222
3223 if (il->cfg->mod_params->amsdu_size_8K)
3224 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3225
3226 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3227 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3228
3229 ht_info->mcs.rx_mask[0] = 0xFF;
3230 if (rx_chains_num >= 2)
3231 ht_info->mcs.rx_mask[1] = 0xFF;
3232 if (rx_chains_num >= 3)
3233 ht_info->mcs.rx_mask[2] = 0xFF;
3234
3235 /* Highest supported Rx data rate */
3236 max_bit_rate *= rx_chains_num;
3237 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3238 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3239
3240 /* Tx MCS capabilities */
3241 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3242 if (tx_chains_num != rx_chains_num) {
3243 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3244 ht_info->mcs.tx_params |=
3245 ((tx_chains_num -
3246 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3247 }
3248}
3249
3250/**
3251 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
3252 */
3253int
3254il_init_geos(struct il_priv *il)
3255{
3256 struct il_channel_info *ch;
3257 struct ieee80211_supported_band *sband;
3258 struct ieee80211_channel *channels;
3259 struct ieee80211_channel *geo_ch;
3260 struct ieee80211_rate *rates;
3261 int i = 0;
3262 s8 max_tx_power = 0;
3263
3264 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3265 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
3266 D_INFO("Geography modes already initialized.\n");
3267 set_bit(S_GEO_CONFIGURED, &il->status);
3268 return 0;
3269 }
3270
3271 channels =
3272 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
3273 GFP_KERNEL);
3274 if (!channels)
3275 return -ENOMEM;
3276
3277 rates =
3278 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3279 GFP_KERNEL);
3280 if (!rates) {
3281 kfree(channels);
3282 return -ENOMEM;
3283 }
3284
3285 /* 5.2GHz channels start after the 2.4GHz channels */
3286 sband = &il->bands[IEEE80211_BAND_5GHZ];
3287 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3288 /* just OFDM */
3289 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3290 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3291
3292 if (il->cfg->sku & IL_SKU_N)
3293 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
3294
3295 sband = &il->bands[IEEE80211_BAND_2GHZ];
3296 sband->channels = channels;
3297 /* OFDM & CCK */
3298 sband->bitrates = rates;
3299 sband->n_bitrates = RATE_COUNT_LEGACY;
3300
3301 if (il->cfg->sku & IL_SKU_N)
3302 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
3303
3304 il->ieee_channels = channels;
3305 il->ieee_rates = rates;
3306
3307 for (i = 0; i < il->channel_count; i++) {
3308 ch = &il->channel_info[i];
3309
3310 if (!il_is_channel_valid(ch))
3311 continue;
3312
3313 sband = &il->bands[ch->band];
3314
3315 geo_ch = &sband->channels[sband->n_channels++];
3316
3317 geo_ch->center_freq =
3318 ieee80211_channel_to_frequency(ch->channel, ch->band);
3319 geo_ch->max_power = ch->max_power_avg;
3320 geo_ch->max_antenna_gain = 0xff;
3321 geo_ch->hw_value = ch->channel;
3322
3323 if (il_is_channel_valid(ch)) {
3324 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3325 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
3326
3327 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3328 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
3329
3330 if (ch->flags & EEPROM_CHANNEL_RADAR)
3331 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3332
3333 geo_ch->flags |= ch->ht40_extension_channel;
3334
3335 if (ch->max_power_avg > max_tx_power)
3336 max_tx_power = ch->max_power_avg;
3337 } else {
3338 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3339 }
3340
3341 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3342 geo_ch->center_freq,
3343 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3344 geo_ch->
3345 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3346 geo_ch->flags);
3347 }
3348
3349 il->tx_power_device_lmt = max_tx_power;
3350 il->tx_power_user_lmt = max_tx_power;
3351 il->tx_power_next = max_tx_power;
3352
3353 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
3354 (il->cfg->sku & IL_SKU_A)) {
3355 IL_INFO("Incorrectly detected BG card as ABG. "
3356 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3357 il->pci_dev->device, il->pci_dev->subsystem_device);
3358 il->cfg->sku &= ~IL_SKU_A;
3359 }
3360
3361 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3362 il->bands[IEEE80211_BAND_2GHZ].n_channels,
3363 il->bands[IEEE80211_BAND_5GHZ].n_channels);
3364
3365 set_bit(S_GEO_CONFIGURED, &il->status);
3366
3367 return 0;
3368}
3369EXPORT_SYMBOL(il_init_geos);
3370
3371/*
3372 * il_free_geos - undo allocations in il_init_geos
3373 */
3374void
3375il_free_geos(struct il_priv *il)
3376{
3377 kfree(il->ieee_channels);
3378 kfree(il->ieee_rates);
3379 clear_bit(S_GEO_CONFIGURED, &il->status);
3380}
3381EXPORT_SYMBOL(il_free_geos);
3382
3383static bool
3384il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
3385 u16 channel, u8 extension_chan_offset)
3386{
3387 const struct il_channel_info *ch_info;
3388
3389 ch_info = il_get_channel_info(il, band, channel);
3390 if (!il_is_channel_valid(ch_info))
3391 return false;
3392
3393 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3394 return !(ch_info->
3395 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3396 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3397 return !(ch_info->
3398 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3399
3400 return false;
3401}
3402
3403bool
3404il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
3405 struct ieee80211_sta_ht_cap *ht_cap)
3406{
3407 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
3408 return false;
3409
3410 /*
3411 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
3412 * the bit will not set if it is pure 40MHz case
3413 */
3414 if (ht_cap && !ht_cap->ht_supported)
3415 return false;
3416
3417#ifdef CONFIG_IWLEGACY_DEBUGFS
3418 if (il->disable_ht40)
3419 return false;
3420#endif
3421
3422 return il_is_channel_extension(il, il->band,
3423 le16_to_cpu(ctx->staging.channel),
3424 ctx->ht.extension_chan_offset);
3425}
3426EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3427
3428static u16
3429il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3430{
3431 u16 new_val;
3432 u16 beacon_factor;
3433
3434 /*
3435 * If mac80211 hasn't given us a beacon interval, program
3436 * the default into the device.
3437 */
3438 if (!beacon_val)
3439 return DEFAULT_BEACON_INTERVAL;
3440
3441 /*
3442 * If the beacon interval we obtained from the peer
3443 * is too large, we'll have to wake up more often
3444 * (and in IBSS case, we'll beacon too much)
3445 *
3446 * For example, if max_beacon_val is 4096, and the
3447 * requested beacon interval is 7000, we'll have to
3448 * use 3500 to be able to wake up on the beacons.
3449 *
3450 * This could badly influence beacon detection stats.
3451 */
3452
3453 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3454 new_val = beacon_val / beacon_factor;
3455
3456 if (!new_val)
3457 new_val = max_beacon_val;
3458
3459 return new_val;
3460}
3461
3462int
3463il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
3464{
3465 u64 tsf;
3466 s32 interval_tm, rem;
3467 struct ieee80211_conf *conf = NULL;
3468 u16 beacon_int;
3469 struct ieee80211_vif *vif = ctx->vif;
3470
3471 conf = &il->hw->conf;
3472
3473 lockdep_assert_held(&il->mutex);
3474
3475 memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
3476
3477 ctx->timing.timestamp = cpu_to_le64(il->timestamp);
3478 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3479
3480 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3481
3482 /*
3483 * TODO: For IBSS we need to get atim_win from mac80211,
3484 * for now just always use 0
3485 */
3486 ctx->timing.atim_win = 0;
3487
3488 beacon_int =
3489 il_adjust_beacon_interval(beacon_int,
3490 il->hw_params.max_beacon_itrvl *
3491 TIME_UNIT);
3492 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
3493
3494 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
3495 interval_tm = beacon_int * TIME_UNIT;
3496 rem = do_div(tsf, interval_tm);
3497 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3498
3499 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3500
3501 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3502 le16_to_cpu(ctx->timing.beacon_interval),
3503 le32_to_cpu(ctx->timing.beacon_init_val),
3504 le16_to_cpu(ctx->timing.atim_win));
3505
3506 return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
3507 &ctx->timing);
3508}
3509EXPORT_SYMBOL(il_send_rxon_timing);
3510
3511void
3512il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
3513 int hw_decrypt)
3514{
3515 struct il_rxon_cmd *rxon = &ctx->staging;
3516
3517 if (hw_decrypt)
3518 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3519 else
3520 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3521
3522}
3523EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3524
3525/* validate RXON structure is valid */
3526int
3527il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
3528{
3529 struct il_rxon_cmd *rxon = &ctx->staging;
3530 bool error = false;
3531
3532 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3533 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3534 IL_WARN("check 2.4G: wrong narrow\n");
3535 error = true;
3536 }
3537 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3538 IL_WARN("check 2.4G: wrong radar\n");
3539 error = true;
3540 }
3541 } else {
3542 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3543 IL_WARN("check 5.2G: not short slot!\n");
3544 error = true;
3545 }
3546 if (rxon->flags & RXON_FLG_CCK_MSK) {
3547 IL_WARN("check 5.2G: CCK!\n");
3548 error = true;
3549 }
3550 }
3551 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3552 IL_WARN("mac/bssid mcast!\n");
3553 error = true;
3554 }
3555
3556 /* make sure basic rates 6Mbps and 1Mbps are supported */
3557 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3558 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3559 IL_WARN("neither 1 nor 6 are basic\n");
3560 error = true;
3561 }
3562
3563 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3564 IL_WARN("aid > 2007\n");
3565 error = true;
3566 }
3567
3568 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3569 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3570 IL_WARN("CCK and short slot\n");
3571 error = true;
3572 }
3573
3574 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3575 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3576 IL_WARN("CCK and auto detect");
3577 error = true;
3578 }
3579
3580 if ((rxon->
3581 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3582 RXON_FLG_TGG_PROTECT_MSK) {
3583 IL_WARN("TGg but no auto-detect\n");
3584 error = true;
3585 }
3586
3587 if (error)
3588 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3589
3590 if (error) {
3591 IL_ERR("Invalid RXON\n");
3592 return -EINVAL;
3593 }
3594 return 0;
3595}
3596EXPORT_SYMBOL(il_check_rxon_cmd);
3597
3598/**
3599 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
3600 * @il: staging_rxon is compared to active_rxon
3601 *
3602 * If the RXON structure is changing enough to require a new tune,
3603 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
3604 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
3605 */
3606int
3607il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
3608{
3609 const struct il_rxon_cmd *staging = &ctx->staging;
3610 const struct il_rxon_cmd *active = &ctx->active;
3611
3612#define CHK(cond) \
3613 if ((cond)) { \
3614 D_INFO("need full RXON - " #cond "\n"); \
3615 return 1; \
3616 }
3617
3618#define CHK_NEQ(c1, c2) \
3619 if ((c1) != (c2)) { \
3620 D_INFO("need full RXON - " \
3621 #c1 " != " #c2 " - %d != %d\n", \
3622 (c1), (c2)); \
3623 return 1; \
3624 }
3625
3626 /* These items are only settable from the full RXON command */
3627 CHK(!il_is_associated_ctx(ctx));
3628 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
3629 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
3630 CHK(compare_ether_addr
3631 (staging->wlap_bssid_addr, active->wlap_bssid_addr));
3632 CHK_NEQ(staging->dev_type, active->dev_type);
3633 CHK_NEQ(staging->channel, active->channel);
3634 CHK_NEQ(staging->air_propagation, active->air_propagation);
3635 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3636 active->ofdm_ht_single_stream_basic_rates);
3637 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3638 active->ofdm_ht_dual_stream_basic_rates);
3639 CHK_NEQ(staging->assoc_id, active->assoc_id);
3640
3641 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
3642 * be updated with the RXON_ASSOC command -- however only some
3643 * flag transitions are allowed using RXON_ASSOC */
3644
3645 /* Check if we are not switching bands */
3646 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3647 active->flags & RXON_FLG_BAND_24G_MSK);
3648
3649 /* Check if we are switching association toggle */
3650 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3651 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3652
3653#undef CHK
3654#undef CHK_NEQ
3655
3656 return 0;
3657}
3658EXPORT_SYMBOL(il_full_rxon_required);
3659
3660u8
3661il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
3662{
3663 /*
3664 * Assign the lowest rate -- should really get this from
3665 * the beacon skb from mac80211.
3666 */
3667 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
3668 return RATE_1M_PLCP;
3669 else
3670 return RATE_6M_PLCP;
3671}
3672EXPORT_SYMBOL(il_get_lowest_plcp);
3673
3674static void
3675_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
3676 struct il_rxon_context *ctx)
3677{
3678 struct il_rxon_cmd *rxon = &ctx->staging;
3679
3680 if (!ctx->ht.enabled) {
3681 rxon->flags &=
3682 ~(RXON_FLG_CHANNEL_MODE_MSK |
3683 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3684 | RXON_FLG_HT_PROT_MSK);
3685 return;
3686 }
3687
3688 rxon->flags |=
3689 cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3690
3691 /* Set up channel bandwidth:
3692 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
3693 /* clear the HT channel mode before set the mode */
3694 rxon->flags &=
3695 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3696 if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
3697 /* pure ht40 */
3698 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3699 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3700 /* Note: control channel is opposite of extension channel */
3701 switch (ctx->ht.extension_chan_offset) {
3702 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3703 rxon->flags &=
3704 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3705 break;
3706 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3707 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3708 break;
3709 }
3710 } else {
3711 /* Note: control channel is opposite of extension channel */
3712 switch (ctx->ht.extension_chan_offset) {
3713 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3714 rxon->flags &=
3715 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3716 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3717 break;
3718 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3719 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3720 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3721 break;
3722 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3723 default:
3724 /* channel location only valid if in Mixed mode */
3725 IL_ERR("invalid extension channel offset\n");
3726 break;
3727 }
3728 }
3729 } else {
3730 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3731 }
3732
3733 if (il->cfg->ops->hcmd->set_rxon_chain)
3734 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
3735
3736 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3737 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3738 ctx->ht.protection, ctx->ht.extension_chan_offset);
3739}
3740
3741void
3742il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3743{
3744 _il_set_rxon_ht(il, ht_conf, &il->ctx);
3745}
3746EXPORT_SYMBOL(il_set_rxon_ht);
3747
3748/* Return valid, unused, channel for a passive scan to reset the RF */
3749u8
3750il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
3751{
3752 const struct il_channel_info *ch_info;
3753 int i;
3754 u8 channel = 0;
3755 u8 min, max;
3756
3757 if (band == IEEE80211_BAND_5GHZ) {
3758 min = 14;
3759 max = il->channel_count;
3760 } else {
3761 min = 0;
3762 max = 14;
3763 }
3764
3765 for (i = min; i < max; i++) {
3766 channel = il->channel_info[i].channel;
3767 if (channel == le16_to_cpu(il->ctx.staging.channel))
3768 continue;
3769
3770 ch_info = il_get_channel_info(il, band, channel);
3771 if (il_is_channel_valid(ch_info))
3772 break;
3773 }
3774
3775 return channel;
3776}
3777EXPORT_SYMBOL(il_get_single_channel_number);
3778
3779/**
3780 * il_set_rxon_channel - Set the band and channel values in staging RXON
3781 * @ch: requested channel as a pointer to struct ieee80211_channel
3782
3783 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
3784 * in the staging RXON flag structure based on the ch->band
3785 */
3786int
3787il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
3788 struct il_rxon_context *ctx)
3789{
3790 enum ieee80211_band band = ch->band;
3791 u16 channel = ch->hw_value;
3792
3793 if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
3794 return 0;
3795
3796 ctx->staging.channel = cpu_to_le16(channel);
3797 if (band == IEEE80211_BAND_5GHZ)
3798 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3799 else
3800 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
3801
3802 il->band = band;
3803
3804 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3805
3806 return 0;
3807}
3808EXPORT_SYMBOL(il_set_rxon_channel);
3809
3810void
3811il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
3812 enum ieee80211_band band, struct ieee80211_vif *vif)
3813{
3814 if (band == IEEE80211_BAND_5GHZ) {
3815 ctx->staging.flags &=
3816 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3817 RXON_FLG_CCK_MSK);
3818 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3819 } else {
3820 /* Copied from il_post_associate() */
3821 if (vif && vif->bss_conf.use_short_slot)
3822 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3823 else
3824 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3825
3826 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
3827 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3828 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
3829 }
3830}
3831EXPORT_SYMBOL(il_set_flags_for_band);
3832
3833/*
3834 * initialize rxon structure with default values from eeprom
3835 */
3836void
3837il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
3838{
3839 const struct il_channel_info *ch_info;
3840
3841 memset(&ctx->staging, 0, sizeof(ctx->staging));
3842
3843 if (!ctx->vif) {
3844 ctx->staging.dev_type = ctx->unused_devtype;
3845 } else
3846 switch (ctx->vif->type) {
3847
3848 case NL80211_IFTYPE_STATION:
3849 ctx->staging.dev_type = ctx->station_devtype;
3850 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
3851 break;
3852
3853 case NL80211_IFTYPE_ADHOC:
3854 ctx->staging.dev_type = ctx->ibss_devtype;
3855 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
3856 ctx->staging.filter_flags =
3857 RXON_FILTER_BCON_AWARE_MSK |
3858 RXON_FILTER_ACCEPT_GRP_MSK;
3859 break;
3860
3861 default:
3862 IL_ERR("Unsupported interface type %d\n",
3863 ctx->vif->type);
3864 break;
3865 }
3866
3867#if 0
3868 /* TODO: Figure out when short_preamble would be set and cache from
3869 * that */
3870 if (!hw_to_local(il->hw)->short_preamble)
3871 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3872 else
3873 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3874#endif
3875
3876 ch_info =
3877 il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
3878
3879 if (!ch_info)
3880 ch_info = &il->channel_info[0];
3881
3882 ctx->staging.channel = cpu_to_le16(ch_info->channel);
3883 il->band = ch_info->band;
3884
3885 il_set_flags_for_band(il, ctx, il->band, ctx->vif);
3886
3887 ctx->staging.ofdm_basic_rates =
3888 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
3889 ctx->staging.cck_basic_rates =
3890 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
3891
3892 /* clear both MIX and PURE40 mode flag */
3893 ctx->staging.flags &=
3894 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
3895 if (ctx->vif)
3896 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
3897
3898 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
3899 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
3900}
3901EXPORT_SYMBOL(il_connection_init_rx_config);
3902
3903void
3904il_set_rate(struct il_priv *il)
3905{
3906 const struct ieee80211_supported_band *hw = NULL;
3907 struct ieee80211_rate *rate;
3908 int i;
3909
3910 hw = il_get_hw_mode(il, il->band);
3911 if (!hw) {
3912 IL_ERR("Failed to set rate: unable to get hw mode\n");
3913 return;
3914 }
3915
3916 il->active_rate = 0;
3917
3918 for (i = 0; i < hw->n_bitrates; i++) {
3919 rate = &(hw->bitrates[i]);
3920 if (rate->hw_value < RATE_COUNT_LEGACY)
3921 il->active_rate |= (1 << rate->hw_value);
3922 }
3923
3924 D_RATE("Set active_rate = %0x\n", il->active_rate);
3925
3926 il->ctx.staging.cck_basic_rates =
3927 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
3928
3929 il->ctx.staging.ofdm_basic_rates =
3930 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
3931}
3932EXPORT_SYMBOL(il_set_rate);
3933
3934void
3935il_chswitch_done(struct il_priv *il, bool is_success)
3936{
3937 struct il_rxon_context *ctx = &il->ctx;
3938
3939 if (test_bit(S_EXIT_PENDING, &il->status))
3940 return;
3941
3942 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
3943 ieee80211_chswitch_done(ctx->vif, is_success);
3944}
3945EXPORT_SYMBOL(il_chswitch_done);
3946
3947void
3948il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
3949{
3950 struct il_rx_pkt *pkt = rxb_addr(rxb);
3951 struct il_csa_notification *csa = &(pkt->u.csa_notif);
3952
3953 struct il_rxon_context *ctx = &il->ctx;
3954 struct il_rxon_cmd *rxon = (void *)&ctx->active;
3955
3956 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
3957 return;
3958
3959 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
3960 rxon->channel = csa->channel;
3961 ctx->staging.channel = csa->channel;
3962 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
3963 il_chswitch_done(il, true);
3964 } else {
3965 IL_ERR("CSA notif (fail) : channel %d\n",
3966 le16_to_cpu(csa->channel));
3967 il_chswitch_done(il, false);
3968 }
3969}
3970EXPORT_SYMBOL(il_hdl_csa);
3971
3972#ifdef CONFIG_IWLEGACY_DEBUG
3973void
3974il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
3975{
3976 struct il_rxon_cmd *rxon = &ctx->staging;
3977
3978 D_RADIO("RX CONFIG:\n");
3979 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
3980 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3981 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3982 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
3983 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3984 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
3985 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
3986 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3987 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
3988 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3989}
3990EXPORT_SYMBOL(il_print_rx_config_cmd);
3991#endif
3992/**
3993 * il_irq_handle_error - called for HW or SW error interrupt from card
3994 */
3995void
3996il_irq_handle_error(struct il_priv *il)
3997{
3998 /* Set the FW error flag -- cleared on il_down */
3999 set_bit(S_FW_ERROR, &il->status);
4000
4001 /* Cancel currently queued command. */
4002 clear_bit(S_HCMD_ACTIVE, &il->status);
4003
4004 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4005
4006 il->cfg->ops->lib->dump_nic_error_log(il);
4007 if (il->cfg->ops->lib->dump_fh)
4008 il->cfg->ops->lib->dump_fh(il, NULL, false);
4009#ifdef CONFIG_IWLEGACY_DEBUG
4010 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4011 il_print_rx_config_cmd(il, &il->ctx);
4012#endif
4013
4014 wake_up(&il->wait_command_queue);
4015
4016 /* Keep the restart process from trying to send host
4017 * commands by clearing the INIT status bit */
4018 clear_bit(S_READY, &il->status);
4019
4020 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4021 IL_DBG(IL_DL_FW_ERRORS,
4022 "Restarting adapter due to uCode error.\n");
4023
4024 if (il->cfg->mod_params->restart_fw)
4025 queue_work(il->workqueue, &il->restart);
4026 }
4027}
4028EXPORT_SYMBOL(il_irq_handle_error);
4029
4030static int
4031il_apm_stop_master(struct il_priv *il)
4032{
4033 int ret = 0;
4034
4035 /* stop device's busmaster DMA activity */
4036 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4037
4038 ret =
4039 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4040 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4041 if (ret)
4042 IL_WARN("Master Disable Timed Out, 100 usec\n");
4043
4044 D_INFO("stop master\n");
4045
4046 return ret;
4047}
4048
4049void
4050il_apm_stop(struct il_priv *il)
4051{
4052 D_INFO("Stop card, put in low power state\n");
4053
4054 /* Stop device's DMA activity */
4055 il_apm_stop_master(il);
4056
4057 /* Reset the entire device */
4058 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4059
4060 udelay(10);
4061
4062 /*
4063 * Clear "initialization complete" bit to move adapter from
4064 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
4065 */
4066 il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4067}
4068EXPORT_SYMBOL(il_apm_stop);
4069
4070/*
4071 * Start up NIC's basic functionality after it has been reset
4072 * (e.g. after platform boot, or shutdown via il_apm_stop())
4073 * NOTE: This does not load uCode nor start the embedded processor
4074 */
4075int
4076il_apm_init(struct il_priv *il)
4077{
4078 int ret = 0;
4079 u16 lctl;
4080
4081 D_INFO("Init card's basic functions\n");
4082
4083 /*
4084 * Use "set_bit" below rather than "write", to preserve any hardware
4085 * bits already set by default after reset.
4086 */
4087
4088 /* Disable L0S exit timer (platform NMI Work/Around) */
4089 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4090 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4091
4092 /*
4093 * Disable L0s without affecting L1;
4094 * don't wait for ICH L0s (ICH bug W/A)
4095 */
4096 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4097 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4098
4099 /* Set FH wait threshold to maximum (HW error during stress W/A) */
4100 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4101
4102 /*
4103 * Enable HAP INTA (interrupt from management bus) to
4104 * wake device's PCI Express link L1a -> L0s
4105 * NOTE: This is no-op for 3945 (non-existent bit)
4106 */
4107 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4108 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4109
4110 /*
4111 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
4112 * Check if BIOS (or OS) enabled L1-ASPM on this device.
4113 * If so (likely), disable L0S, so device moves directly L0->L1;
4114 * costs negligible amount of power savings.
4115 * If not (unlikely), enable L0S, so there is at least some
4116 * power savings, even without L1.
4117 */
4118 if (il->cfg->base_params->set_l0s) {
4119 lctl = il_pcie_link_ctl(il);
4120 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
4121 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
4122 /* L1-ASPM enabled; disable(!) L0S */
4123 il_set_bit(il, CSR_GIO_REG,
4124 CSR_GIO_REG_VAL_L0S_ENABLED);
4125 D_POWER("L1 Enabled; Disabling L0S\n");
4126 } else {
4127 /* L1-ASPM disabled; enable(!) L0S */
4128 il_clear_bit(il, CSR_GIO_REG,
4129 CSR_GIO_REG_VAL_L0S_ENABLED);
4130 D_POWER("L1 Disabled; Enabling L0S\n");
4131 }
4132 }
4133
4134 /* Configure analog phase-lock-loop before activating to D0A */
4135 if (il->cfg->base_params->pll_cfg_val)
4136 il_set_bit(il, CSR_ANA_PLL_CFG,
4137 il->cfg->base_params->pll_cfg_val);
4138
4139 /*
4140 * Set "initialization complete" bit to move adapter from
4141 * D0U* --> D0A* (powered-up active) state.
4142 */
4143 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4144
4145 /*
4146 * Wait for clock stabilization; once stabilized, access to
4147 * device-internal resources is supported, e.g. il_wr_prph()
4148 * and accesses to uCode SRAM.
4149 */
4150 ret =
4151 _il_poll_bit(il, CSR_GP_CNTRL,
4152 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4153 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4154 if (ret < 0) {
4155 D_INFO("Failed to init the card\n");
4156 goto out;
4157 }
4158
4159 /*
4160 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
4161 * BSM (Boostrap State Machine) is only in 3945 and 4965.
4162 *
4163 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
4164 * do not disable clocks. This preserves any hardware bits already
4165 * set by default in "CLK_CTRL_REG" after reset.
4166 */
4167 if (il->cfg->base_params->use_bsm)
4168 il_wr_prph(il, APMG_CLK_EN_REG,
4169 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4170 else
4171 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4172 udelay(20);
4173
4174 /* Disable L1-Active */
4175 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4176 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4177
4178out:
4179 return ret;
4180}
4181EXPORT_SYMBOL(il_apm_init);
4182
4183int
4184il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4185{
4186 int ret;
4187 s8 prev_tx_power;
4188 bool defer;
4189 struct il_rxon_context *ctx = &il->ctx;
4190
4191 lockdep_assert_held(&il->mutex);
4192
4193 if (il->tx_power_user_lmt == tx_power && !force)
4194 return 0;
4195
4196 if (!il->cfg->ops->lib->send_tx_power)
4197 return -EOPNOTSUPP;
4198
4199 /* 0 dBm mean 1 milliwatt */
4200 if (tx_power < 0) {
4201 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4202 return -EINVAL;
4203 }
4204
4205 if (tx_power > il->tx_power_device_lmt) {
4206 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4207 tx_power, il->tx_power_device_lmt);
4208 return -EINVAL;
4209 }
4210
4211 if (!il_is_ready_rf(il))
4212 return -EIO;
4213
4214 /* scan complete and commit_rxon use tx_power_next value,
4215 * it always need to be updated for newest request */
4216 il->tx_power_next = tx_power;
4217
4218 /* do not set tx power when scanning or channel changing */
4219 defer = test_bit(S_SCANNING, &il->status) ||
4220 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
4221 if (defer && !force) {
4222 D_INFO("Deferring tx power set\n");
4223 return 0;
4224 }
4225
4226 prev_tx_power = il->tx_power_user_lmt;
4227 il->tx_power_user_lmt = tx_power;
4228
4229 ret = il->cfg->ops->lib->send_tx_power(il);
4230
4231 /* if fail to set tx_power, restore the orig. tx power */
4232 if (ret) {
4233 il->tx_power_user_lmt = prev_tx_power;
4234 il->tx_power_next = prev_tx_power;
4235 }
4236 return ret;
4237}
4238EXPORT_SYMBOL(il_set_tx_power);
4239
4240void
4241il_send_bt_config(struct il_priv *il)
4242{
4243 struct il_bt_cmd bt_cmd = {
4244 .lead_time = BT_LEAD_TIME_DEF,
4245 .max_kill = BT_MAX_KILL_DEF,
4246 .kill_ack_mask = 0,
4247 .kill_cts_mask = 0,
4248 };
4249
4250 if (!bt_coex_active)
4251 bt_cmd.flags = BT_COEX_DISABLE;
4252 else
4253 bt_cmd.flags = BT_COEX_ENABLE;
4254
4255 D_INFO("BT coex %s\n",
4256 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4257
4258 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4259 IL_ERR("failed to send BT Coex Config\n");
4260}
4261EXPORT_SYMBOL(il_send_bt_config);
4262
4263int
4264il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4265{
4266 struct il_stats_cmd stats_cmd = {
4267 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4268 };
4269
4270 if (flags & CMD_ASYNC)
4271 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4272 &stats_cmd, NULL);
4273 else
4274 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4275 &stats_cmd);
4276}
4277EXPORT_SYMBOL(il_send_stats_request);
4278
4279void
4280il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4281{
4282#ifdef CONFIG_IWLEGACY_DEBUG
4283 struct il_rx_pkt *pkt = rxb_addr(rxb);
4284 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4285 D_RX("sleep mode: %d, src: %d\n",
4286 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4287#endif
4288}
4289EXPORT_SYMBOL(il_hdl_pm_sleep);
4290
4291void
4292il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4293{
4294 struct il_rx_pkt *pkt = rxb_addr(rxb);
4295 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4296 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4297 il_get_cmd_string(pkt->hdr.cmd));
4298 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4299}
4300EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4301
4302void
4303il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4304{
4305 struct il_rx_pkt *pkt = rxb_addr(rxb);
4306
4307 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4308 "seq 0x%04X ser 0x%08X\n",
4309 le32_to_cpu(pkt->u.err_resp.error_type),
4310 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4311 pkt->u.err_resp.cmd_id,
4312 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4313 le32_to_cpu(pkt->u.err_resp.error_info));
4314}
4315EXPORT_SYMBOL(il_hdl_error);
4316
4317void
4318il_clear_isr_stats(struct il_priv *il)
4319{
4320 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4321}
4322
4323int
4324il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4325 const struct ieee80211_tx_queue_params *params)
4326{
4327 struct il_priv *il = hw->priv;
4328 unsigned long flags;
4329 int q;
4330
4331 D_MAC80211("enter\n");
4332
4333 if (!il_is_ready_rf(il)) {
4334 D_MAC80211("leave - RF not ready\n");
4335 return -EIO;
4336 }
4337
4338 if (queue >= AC_NUM) {
4339 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4340 return 0;
4341 }
4342
4343 q = AC_NUM - 1 - queue;
4344
4345 spin_lock_irqsave(&il->lock, flags);
4346
4347 il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
4348 cpu_to_le16(params->cw_min);
4349 il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
4350 cpu_to_le16(params->cw_max);
4351 il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4352 il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
4353 cpu_to_le16((params->txop * 32));
4354
4355 il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
4356
4357 spin_unlock_irqrestore(&il->lock, flags);
4358
4359 D_MAC80211("leave\n");
4360 return 0;
4361}
4362EXPORT_SYMBOL(il_mac_conf_tx);
4363
4364int
4365il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4366{
4367 struct il_priv *il = hw->priv;
4368
4369 return il->ibss_manager == IL_IBSS_MANAGER;
4370}
4371EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4372
4373static int
4374il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
4375{
4376 il_connection_init_rx_config(il, ctx);
4377
4378 if (il->cfg->ops->hcmd->set_rxon_chain)
4379 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
4380
4381 return il_commit_rxon(il, ctx);
4382}
4383
4384static int
4385il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
4386{
4387 struct ieee80211_vif *vif = ctx->vif;
4388 int err;
4389
4390 lockdep_assert_held(&il->mutex);
4391
4392 /*
4393 * This variable will be correct only when there's just
4394 * a single context, but all code using it is for hardware
4395 * that supports only one context.
4396 */
4397 il->iw_mode = vif->type;
4398
4399 ctx->is_active = true;
4400
4401 err = il_set_mode(il, ctx);
4402 if (err) {
4403 if (!ctx->always_active)
4404 ctx->is_active = false;
4405 return err;
4406 }
4407
4408 return 0;
4409}
4410
4411int
4412il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4413{
4414 struct il_priv *il = hw->priv;
4415 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
4416 int err;
4417 u32 modes;
4418
4419 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4420
4421 mutex_lock(&il->mutex);
4422
4423 if (!il_is_ready_rf(il)) {
4424 IL_WARN("Try to add interface when device not ready\n");
4425 err = -EINVAL;
4426 goto out;
4427 }
4428
4429 /* check if busy context is exclusive */
4430 if (il->ctx.vif &&
4431 (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
4432 err = -EINVAL;
4433 goto out;
4434 }
4435
4436 modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
4437 if (!(modes & BIT(vif->type))) {
4438 err = -EOPNOTSUPP;
4439 goto out;
4440 }
4441
4442 vif_priv->ctx = &il->ctx;
4443 il->ctx.vif = vif;
4444
4445 err = il_setup_interface(il, &il->ctx);
4446 if (err) {
4447 il->ctx.vif = NULL;
4448 il->iw_mode = NL80211_IFTYPE_STATION;
4449 }
4450
4451out:
4452 mutex_unlock(&il->mutex);
4453
4454 D_MAC80211("leave\n");
4455 return err;
4456}
4457EXPORT_SYMBOL(il_mac_add_interface);
4458
4459static void
4460il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
4461 bool mode_change)
4462{
4463 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4464
4465 lockdep_assert_held(&il->mutex);
4466
4467 if (il->scan_vif == vif) {
4468 il_scan_cancel_timeout(il, 200);
4469 il_force_scan_end(il);
4470 }
4471
4472 if (!mode_change) {
4473 il_set_mode(il, ctx);
4474 if (!ctx->always_active)
4475 ctx->is_active = false;
4476 }
4477}
4478
4479void
4480il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4481{
4482 struct il_priv *il = hw->priv;
4483 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4484
4485 D_MAC80211("enter\n");
4486
4487 mutex_lock(&il->mutex);
4488
4489 WARN_ON(ctx->vif != vif);
4490 ctx->vif = NULL;
4491
4492 il_teardown_interface(il, vif, false);
4493
4494 memset(il->bssid, 0, ETH_ALEN);
4495 mutex_unlock(&il->mutex);
4496
4497 D_MAC80211("leave\n");
4498
4499}
4500EXPORT_SYMBOL(il_mac_remove_interface);
4501
4502int
4503il_alloc_txq_mem(struct il_priv *il)
4504{
4505 if (!il->txq)
4506 il->txq =
4507 kzalloc(sizeof(struct il_tx_queue) *
4508 il->cfg->base_params->num_of_queues, GFP_KERNEL);
4509 if (!il->txq) {
4510 IL_ERR("Not enough memory for txq\n");
4511 return -ENOMEM;
4512 }
4513 return 0;
4514}
4515EXPORT_SYMBOL(il_alloc_txq_mem);
4516
4517void
4518il_txq_mem(struct il_priv *il)
4519{
4520 kfree(il->txq);
4521 il->txq = NULL;
4522}
4523EXPORT_SYMBOL(il_txq_mem);
4524
4525#ifdef CONFIG_IWLEGACY_DEBUGFS
4526
4527#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
4528
4529void
4530il_reset_traffic_log(struct il_priv *il)
4531{
4532 il->tx_traffic_idx = 0;
4533 il->rx_traffic_idx = 0;
4534 if (il->tx_traffic)
4535 memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
4536 if (il->rx_traffic)
4537 memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
4538}
4539
4540int
4541il_alloc_traffic_mem(struct il_priv *il)
4542{
4543 u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
4544
4545 if (il_debug_level & IL_DL_TX) {
4546 if (!il->tx_traffic) {
4547 il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
4548 if (!il->tx_traffic)
4549 return -ENOMEM;
4550 }
4551 }
4552 if (il_debug_level & IL_DL_RX) {
4553 if (!il->rx_traffic) {
4554 il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
4555 if (!il->rx_traffic)
4556 return -ENOMEM;
4557 }
4558 }
4559 il_reset_traffic_log(il);
4560 return 0;
4561}
4562EXPORT_SYMBOL(il_alloc_traffic_mem);
4563
4564void
4565il_free_traffic_mem(struct il_priv *il)
4566{
4567 kfree(il->tx_traffic);
4568 il->tx_traffic = NULL;
4569
4570 kfree(il->rx_traffic);
4571 il->rx_traffic = NULL;
4572}
4573EXPORT_SYMBOL(il_free_traffic_mem);
4574
4575void
4576il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
4577 struct ieee80211_hdr *header)
4578{
4579 __le16 fc;
4580 u16 len;
4581
4582 if (likely(!(il_debug_level & IL_DL_TX)))
4583 return;
4584
4585 if (!il->tx_traffic)
4586 return;
4587
4588 fc = header->frame_control;
4589 if (ieee80211_is_data(fc)) {
4590 len =
4591 (length >
4592 IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
4593 memcpy((il->tx_traffic +
4594 (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
4595 len);
4596 il->tx_traffic_idx =
4597 (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
4598 }
4599}
4600EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
4601
4602void
4603il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
4604 struct ieee80211_hdr *header)
4605{
4606 __le16 fc;
4607 u16 len;
4608
4609 if (likely(!(il_debug_level & IL_DL_RX)))
4610 return;
4611
4612 if (!il->rx_traffic)
4613 return;
4614
4615 fc = header->frame_control;
4616 if (ieee80211_is_data(fc)) {
4617 len =
4618 (length >
4619 IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
4620 memcpy((il->rx_traffic +
4621 (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
4622 len);
4623 il->rx_traffic_idx =
4624 (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
4625 }
4626}
4627EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
4628
4629const char *
4630il_get_mgmt_string(int cmd)
4631{
4632 switch (cmd) {
4633 IL_CMD(MANAGEMENT_ASSOC_REQ);
4634 IL_CMD(MANAGEMENT_ASSOC_RESP);
4635 IL_CMD(MANAGEMENT_REASSOC_REQ);
4636 IL_CMD(MANAGEMENT_REASSOC_RESP);
4637 IL_CMD(MANAGEMENT_PROBE_REQ);
4638 IL_CMD(MANAGEMENT_PROBE_RESP);
4639 IL_CMD(MANAGEMENT_BEACON);
4640 IL_CMD(MANAGEMENT_ATIM);
4641 IL_CMD(MANAGEMENT_DISASSOC);
4642 IL_CMD(MANAGEMENT_AUTH);
4643 IL_CMD(MANAGEMENT_DEAUTH);
4644 IL_CMD(MANAGEMENT_ACTION);
4645 default:
4646 return "UNKNOWN";
4647
4648 }
4649}
4650
4651const char *
4652il_get_ctrl_string(int cmd)
4653{
4654 switch (cmd) {
4655 IL_CMD(CONTROL_BACK_REQ);
4656 IL_CMD(CONTROL_BACK);
4657 IL_CMD(CONTROL_PSPOLL);
4658 IL_CMD(CONTROL_RTS);
4659 IL_CMD(CONTROL_CTS);
4660 IL_CMD(CONTROL_ACK);
4661 IL_CMD(CONTROL_CFEND);
4662 IL_CMD(CONTROL_CFENDACK);
4663 default:
4664 return "UNKNOWN";
4665
4666 }
4667}
4668
4669void
4670il_clear_traffic_stats(struct il_priv *il)
4671{
4672 memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
4673 memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
4674}
4675
4676/*
4677 * if CONFIG_IWLEGACY_DEBUGFS defined,
4678 * il_update_stats function will
4679 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
4680 * Use debugFs to display the rx/rx_stats
4681 * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
4682 * information will be recorded, but DATA pkt still will be recorded
4683 * for the reason of il_led.c need to control the led blinking based on
4684 * number of tx and rx data.
4685 *
4686 */
4687void
4688il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
4689{
4690 struct traffic_stats *stats;
4691
4692 if (is_tx)
4693 stats = &il->tx_stats;
4694 else
4695 stats = &il->rx_stats;
4696
4697 if (ieee80211_is_mgmt(fc)) {
4698 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4699 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
4700 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
4701 break;
4702 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
4703 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
4704 break;
4705 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
4706 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
4707 break;
4708 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
4709 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
4710 break;
4711 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
4712 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
4713 break;
4714 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
4715 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
4716 break;
4717 case cpu_to_le16(IEEE80211_STYPE_BEACON):
4718 stats->mgmt[MANAGEMENT_BEACON]++;
4719 break;
4720 case cpu_to_le16(IEEE80211_STYPE_ATIM):
4721 stats->mgmt[MANAGEMENT_ATIM]++;
4722 break;
4723 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
4724 stats->mgmt[MANAGEMENT_DISASSOC]++;
4725 break;
4726 case cpu_to_le16(IEEE80211_STYPE_AUTH):
4727 stats->mgmt[MANAGEMENT_AUTH]++;
4728 break;
4729 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
4730 stats->mgmt[MANAGEMENT_DEAUTH]++;
4731 break;
4732 case cpu_to_le16(IEEE80211_STYPE_ACTION):
4733 stats->mgmt[MANAGEMENT_ACTION]++;
4734 break;
4735 }
4736 } else if (ieee80211_is_ctl(fc)) {
4737 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4738 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
4739 stats->ctrl[CONTROL_BACK_REQ]++;
4740 break;
4741 case cpu_to_le16(IEEE80211_STYPE_BACK):
4742 stats->ctrl[CONTROL_BACK]++;
4743 break;
4744 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
4745 stats->ctrl[CONTROL_PSPOLL]++;
4746 break;
4747 case cpu_to_le16(IEEE80211_STYPE_RTS):
4748 stats->ctrl[CONTROL_RTS]++;
4749 break;
4750 case cpu_to_le16(IEEE80211_STYPE_CTS):
4751 stats->ctrl[CONTROL_CTS]++;
4752 break;
4753 case cpu_to_le16(IEEE80211_STYPE_ACK):
4754 stats->ctrl[CONTROL_ACK]++;
4755 break;
4756 case cpu_to_le16(IEEE80211_STYPE_CFEND):
4757 stats->ctrl[CONTROL_CFEND]++;
4758 break;
4759 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
4760 stats->ctrl[CONTROL_CFENDACK]++;
4761 break;
4762 }
4763 } else {
4764 /* data */
4765 stats->data_cnt++;
4766 stats->data_bytes += len;
4767 }
4768}
4769EXPORT_SYMBOL(il_update_stats);
4770#endif
4771
4772int
4773il_force_reset(struct il_priv *il, bool external)
4774{
4775 struct il_force_reset *force_reset;
4776
4777 if (test_bit(S_EXIT_PENDING, &il->status))
4778 return -EINVAL;
4779
4780 force_reset = &il->force_reset;
4781 force_reset->reset_request_count++;
4782 if (!external) {
4783 if (force_reset->last_force_reset_jiffies &&
4784 time_after(force_reset->last_force_reset_jiffies +
4785 force_reset->reset_duration, jiffies)) {
4786 D_INFO("force reset rejected\n");
4787 force_reset->reset_reject_count++;
4788 return -EAGAIN;
4789 }
4790 }
4791 force_reset->reset_success_count++;
4792 force_reset->last_force_reset_jiffies = jiffies;
4793
4794 /*
4795 * if the request is from external(ex: debugfs),
4796 * then always perform the request in regardless the module
4797 * parameter setting
4798 * if the request is from internal (uCode error or driver
4799 * detect failure), then fw_restart module parameter
4800 * need to be check before performing firmware reload
4801 */
4802
4803 if (!external && !il->cfg->mod_params->restart_fw) {
4804 D_INFO("Cancel firmware reload based on "
4805 "module parameter setting\n");
4806 return 0;
4807 }
4808
4809 IL_ERR("On demand firmware reload\n");
4810
4811 /* Set the FW error flag -- cleared on il_down */
4812 set_bit(S_FW_ERROR, &il->status);
4813 wake_up(&il->wait_command_queue);
4814 /*
4815 * Keep the restart process from trying to send host
4816 * commands by clearing the INIT status bit
4817 */
4818 clear_bit(S_READY, &il->status);
4819 queue_work(il->workqueue, &il->restart);
4820
4821 return 0;
4822}
4823
4824int
4825il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4826 enum nl80211_iftype newtype, bool newp2p)
4827{
4828 struct il_priv *il = hw->priv;
4829 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4830 u32 modes;
4831 int err;
4832
4833 newtype = ieee80211_iftype_p2p(newtype, newp2p);
4834
4835 mutex_lock(&il->mutex);
4836
4837 if (!ctx->vif || !il_is_ready_rf(il)) {
4838 /*
4839 * Huh? But wait ... this can maybe happen when
4840 * we're in the middle of a firmware restart!
4841 */
4842 err = -EBUSY;
4843 goto out;
4844 }
4845
4846 modes = ctx->interface_modes | ctx->exclusive_interface_modes;
4847 if (!(modes & BIT(newtype))) {
4848 err = -EOPNOTSUPP;
4849 goto out;
4850 }
4851
4852 if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
4853 (il->ctx.exclusive_interface_modes & BIT(newtype))) {
4854 err = -EINVAL;
4855 goto out;
4856 }
4857
4858 /* success */
4859 il_teardown_interface(il, vif, true);
4860 vif->type = newtype;
4861 vif->p2p = newp2p;
4862 err = il_setup_interface(il, ctx);
4863 WARN_ON(err);
4864 /*
4865 * We've switched internally, but submitting to the
4866 * device may have failed for some reason. Mask this
4867 * error, because otherwise mac80211 will not switch
4868 * (and set the interface type back) and we'll be
4869 * out of sync with it.
4870 */
4871 err = 0;
4872
4873out:
4874 mutex_unlock(&il->mutex);
4875 return err;
4876}
4877EXPORT_SYMBOL(il_mac_change_interface);
4878
4879/*
4880 * On every watchdog tick we check (latest) time stamp. If it does not
4881 * change during timeout period and queue is not empty we reset firmware.
4882 */
4883static int
4884il_check_stuck_queue(struct il_priv *il, int cnt)
4885{
4886 struct il_tx_queue *txq = &il->txq[cnt];
4887 struct il_queue *q = &txq->q;
4888 unsigned long timeout;
4889 int ret;
4890
4891 if (q->read_ptr == q->write_ptr) {
4892 txq->time_stamp = jiffies;
4893 return 0;
4894 }
4895
4896 timeout =
4897 txq->time_stamp +
4898 msecs_to_jiffies(il->cfg->base_params->wd_timeout);
4899
4900 if (time_after(jiffies, timeout)) {
4901 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4902 il->cfg->base_params->wd_timeout);
4903 ret = il_force_reset(il, false);
4904 return (ret == -EAGAIN) ? 0 : 1;
4905 }
4906
4907 return 0;
4908}
4909
4910/*
4911 * Making watchdog tick be a quarter of timeout assure we will
4912 * discover the queue hung between timeout and 1.25*timeout
4913 */
4914#define IL_WD_TICK(timeout) ((timeout) / 4)
4915
4916/*
4917 * Watchdog timer callback, we check each tx queue for stuck, if if hung
4918 * we reset the firmware. If everything is fine just rearm the timer.
4919 */
4920void
4921il_bg_watchdog(unsigned long data)
4922{
4923 struct il_priv *il = (struct il_priv *)data;
4924 int cnt;
4925 unsigned long timeout;
4926
4927 if (test_bit(S_EXIT_PENDING, &il->status))
4928 return;
4929
4930 timeout = il->cfg->base_params->wd_timeout;
4931 if (timeout == 0)
4932 return;
4933
4934 /* monitor and check for stuck cmd queue */
4935 if (il_check_stuck_queue(il, il->cmd_queue))
4936 return;
4937
4938 /* monitor and check for other stuck queues */
4939 if (il_is_any_associated(il)) {
4940 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4941 /* skip as we already checked the command queue */
4942 if (cnt == il->cmd_queue)
4943 continue;
4944 if (il_check_stuck_queue(il, cnt))
4945 return;
4946 }
4947 }
4948
4949 mod_timer(&il->watchdog,
4950 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4951}
4952EXPORT_SYMBOL(il_bg_watchdog);
4953
4954void
4955il_setup_watchdog(struct il_priv *il)
4956{
4957 unsigned int timeout = il->cfg->base_params->wd_timeout;
4958
4959 if (timeout)
4960 mod_timer(&il->watchdog,
4961 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4962 else
4963 del_timer(&il->watchdog);
4964}
4965EXPORT_SYMBOL(il_setup_watchdog);
4966
4967/*
4968 * extended beacon time format
4969 * time in usec will be changed into a 32-bit value in extended:internal format
4970 * the extended part is the beacon counts
4971 * the internal part is the time in usec within one beacon interval
4972 */
4973u32
4974il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4975{
4976 u32 quot;
4977 u32 rem;
4978 u32 interval = beacon_interval * TIME_UNIT;
4979
4980 if (!interval || !usec)
4981 return 0;
4982
4983 quot =
4984 (usec /
4985 interval) & (il_beacon_time_mask_high(il,
4986 il->hw_params.
4987 beacon_time_tsf_bits) >> il->
4988 hw_params.beacon_time_tsf_bits);
4989 rem =
4990 (usec % interval) & il_beacon_time_mask_low(il,
4991 il->hw_params.
4992 beacon_time_tsf_bits);
4993
4994 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4995}
4996EXPORT_SYMBOL(il_usecs_to_beacons);
4997
4998/* base is usually what we get from ucode with each received frame,
4999 * the same as HW timer counter counting down
5000 */
5001__le32
5002il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
5003 u32 beacon_interval)
5004{
5005 u32 base_low = base & il_beacon_time_mask_low(il,
5006 il->hw_params.
5007 beacon_time_tsf_bits);
5008 u32 addon_low = addon & il_beacon_time_mask_low(il,
5009 il->hw_params.
5010 beacon_time_tsf_bits);
5011 u32 interval = beacon_interval * TIME_UNIT;
5012 u32 res = (base & il_beacon_time_mask_high(il,
5013 il->hw_params.
5014 beacon_time_tsf_bits)) +
5015 (addon & il_beacon_time_mask_high(il,
5016 il->hw_params.
5017 beacon_time_tsf_bits));
5018
5019 if (base_low > addon_low)
5020 res += base_low - addon_low;
5021 else if (base_low < addon_low) {
5022 res += interval + base_low - addon_low;
5023 res += (1 << il->hw_params.beacon_time_tsf_bits);
5024 } else
5025 res += (1 << il->hw_params.beacon_time_tsf_bits);
5026
5027 return cpu_to_le32(res);
5028}
5029EXPORT_SYMBOL(il_add_beacon_time);
5030
5031#ifdef CONFIG_PM
5032
5033int
5034il_pci_suspend(struct device *device)
5035{
5036 struct pci_dev *pdev = to_pci_dev(device);
5037 struct il_priv *il = pci_get_drvdata(pdev);
5038
5039 /*
5040 * This function is called when system goes into suspend state
5041 * mac80211 will call il_mac_stop() from the mac80211 suspend function
5042 * first but since il_mac_stop() has no knowledge of who the caller is,
5043 * it will not call apm_ops.stop() to stop the DMA operation.
5044 * Calling apm_ops.stop here to make sure we stop the DMA.
5045 */
5046 il_apm_stop(il);
5047
5048 return 0;
5049}
5050EXPORT_SYMBOL(il_pci_suspend);
5051
5052int
5053il_pci_resume(struct device *device)
5054{
5055 struct pci_dev *pdev = to_pci_dev(device);
5056 struct il_priv *il = pci_get_drvdata(pdev);
5057 bool hw_rfkill = false;
5058
5059 /*
5060 * We disable the RETRY_TIMEOUT register (0x41) to keep
5061 * PCI Tx retries from interfering with C3 CPU state.
5062 */
5063 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
5064
5065 il_enable_interrupts(il);
5066
5067 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5068 hw_rfkill = true;
5069
5070 if (hw_rfkill)
5071 set_bit(S_RF_KILL_HW, &il->status);
5072 else
5073 clear_bit(S_RF_KILL_HW, &il->status);
5074
5075 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
5076
5077 return 0;
5078}
5079EXPORT_SYMBOL(il_pci_resume);
5080
5081const struct dev_pm_ops il_pm_ops = {
5082 .suspend = il_pci_suspend,
5083 .resume = il_pci_resume,
5084 .freeze = il_pci_suspend,
5085 .thaw = il_pci_resume,
5086 .poweroff = il_pci_suspend,
5087 .restore = il_pci_resume,
5088};
5089EXPORT_SYMBOL(il_pm_ops);
5090
5091#endif /* CONFIG_PM */
5092
5093static void
5094il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
5095{
5096 if (test_bit(S_EXIT_PENDING, &il->status))
5097 return;
5098
5099 if (!ctx->is_active)
5100 return;
5101
5102 ctx->qos_data.def_qos_parm.qos_flags = 0;
5103
5104 if (ctx->qos_data.qos_active)
5105 ctx->qos_data.def_qos_parm.qos_flags |=
5106 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
5107
5108 if (ctx->ht.enabled)
5109 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5110
5111 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
5112 ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
5113
5114 il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
5115 &ctx->qos_data.def_qos_parm, NULL);
5116}
5117
5118/**
5119 * il_mac_config - mac80211 config callback
5120 */
5121int
5122il_mac_config(struct ieee80211_hw *hw, u32 changed)
5123{
5124 struct il_priv *il = hw->priv;
5125 const struct il_channel_info *ch_info;
5126 struct ieee80211_conf *conf = &hw->conf;
5127 struct ieee80211_channel *channel = conf->channel;
5128 struct il_ht_config *ht_conf = &il->current_ht_config;
5129 struct il_rxon_context *ctx = &il->ctx;
5130 unsigned long flags = 0;
5131 int ret = 0;
5132 u16 ch;
5133 int scan_active = 0;
5134 bool ht_changed = false;
5135
5136 if (WARN_ON(!il->cfg->ops->legacy))
5137 return -EOPNOTSUPP;
5138
5139 mutex_lock(&il->mutex);
5140
5141 D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
5142 changed);
5143
5144 if (unlikely(test_bit(S_SCANNING, &il->status))) {
5145 scan_active = 1;
5146 D_MAC80211("scan active\n");
5147 }
5148
5149 if (changed &
5150 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
5151 /* mac80211 uses static for non-HT which is what we want */
5152 il->current_ht_config.smps = conf->smps_mode;
5153
5154 /*
5155 * Recalculate chain counts.
5156 *
5157 * If monitor mode is enabled then mac80211 will
5158 * set up the SM PS mode to OFF if an HT channel is
5159 * configured.
5160 */
5161 if (il->cfg->ops->hcmd->set_rxon_chain)
5162 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
5163 }
5164
5165 /* during scanning mac80211 will delay channel setting until
5166 * scan finish with changed = 0
5167 */
5168 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5169
5170 if (scan_active)
5171 goto set_ch_out;
5172
5173 ch = channel->hw_value;
5174 ch_info = il_get_channel_info(il, channel->band, ch);
5175 if (!il_is_channel_valid(ch_info)) {
5176 D_MAC80211("leave - invalid channel\n");
5177 ret = -EINVAL;
5178 goto set_ch_out;
5179 }
5180
5181 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5182 !il_is_channel_ibss(ch_info)) {
5183 D_MAC80211("leave - not IBSS channel\n");
5184 ret = -EINVAL;
5185 goto set_ch_out;
5186 }
5187
5188 spin_lock_irqsave(&il->lock, flags);
5189
5190 /* Configure HT40 channels */
5191 if (ctx->ht.enabled != conf_is_ht(conf)) {
5192 ctx->ht.enabled = conf_is_ht(conf);
5193 ht_changed = true;
5194 }
5195 if (ctx->ht.enabled) {
5196 if (conf_is_ht40_minus(conf)) {
5197 ctx->ht.extension_chan_offset =
5198 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5199 ctx->ht.is_40mhz = true;
5200 } else if (conf_is_ht40_plus(conf)) {
5201 ctx->ht.extension_chan_offset =
5202 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5203 ctx->ht.is_40mhz = true;
5204 } else {
5205 ctx->ht.extension_chan_offset =
5206 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5207 ctx->ht.is_40mhz = false;
5208 }
5209 } else
5210 ctx->ht.is_40mhz = false;
5211
5212 /*
5213 * Default to no protection. Protection mode will
5214 * later be set from BSS config in il_ht_conf
5215 */
5216 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5217
5218 /* if we are switching from ht to 2.4 clear flags
5219 * from any ht related info since 2.4 does not
5220 * support ht */
5221 if ((le16_to_cpu(ctx->staging.channel) != ch))
5222 ctx->staging.flags = 0;
5223
5224 il_set_rxon_channel(il, channel, ctx);
5225 il_set_rxon_ht(il, ht_conf);
5226
5227 il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
5228
5229 spin_unlock_irqrestore(&il->lock, flags);
5230
5231 if (il->cfg->ops->legacy->update_bcast_stations)
5232 ret = il->cfg->ops->legacy->update_bcast_stations(il);
5233
5234set_ch_out:
5235 /* The list of supported rates and rate mask can be different
5236 * for each band; since the band may have changed, reset
5237 * the rate mask to what mac80211 lists */
5238 il_set_rate(il);
5239 }
5240
5241 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5242 ret = il_power_update_mode(il, false);
5243 if (ret)
5244 D_MAC80211("Error setting sleep level\n");
5245 }
5246
5247 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5248 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5249 conf->power_level);
5250
5251 il_set_tx_power(il, conf->power_level, false);
5252 }
5253
5254 if (!il_is_ready(il)) {
5255 D_MAC80211("leave - not ready\n");
5256 goto out;
5257 }
5258
5259 if (scan_active)
5260 goto out;
5261
5262 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
5263 il_commit_rxon(il, ctx);
5264 else
5265 D_INFO("Not re-sending same RXON configuration.\n");
5266 if (ht_changed)
5267 il_update_qos(il, ctx);
5268
5269out:
5270 D_MAC80211("leave\n");
5271 mutex_unlock(&il->mutex);
5272 return ret;
5273}
5274EXPORT_SYMBOL(il_mac_config);
5275
5276void
5277il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5278{
5279 struct il_priv *il = hw->priv;
5280 unsigned long flags;
5281 struct il_rxon_context *ctx = &il->ctx;
5282
5283 if (WARN_ON(!il->cfg->ops->legacy))
5284 return;
5285
5286 mutex_lock(&il->mutex);
5287 D_MAC80211("enter\n");
5288
5289 spin_lock_irqsave(&il->lock, flags);
5290 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5291 spin_unlock_irqrestore(&il->lock, flags);
5292
5293 spin_lock_irqsave(&il->lock, flags);
5294
5295 /* new association get rid of ibss beacon skb */
5296 if (il->beacon_skb)
5297 dev_kfree_skb(il->beacon_skb);
5298
5299 il->beacon_skb = NULL;
5300
5301 il->timestamp = 0;
5302
5303 spin_unlock_irqrestore(&il->lock, flags);
5304
5305 il_scan_cancel_timeout(il, 100);
5306 if (!il_is_ready_rf(il)) {
5307 D_MAC80211("leave - not ready\n");
5308 mutex_unlock(&il->mutex);
5309 return;
5310 }
5311
5312 /* we are restarting association process
5313 * clear RXON_FILTER_ASSOC_MSK bit
5314 */
5315 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5316 il_commit_rxon(il, ctx);
5317
5318 il_set_rate(il);
5319
5320 mutex_unlock(&il->mutex);
5321
5322 D_MAC80211("leave\n");
5323}
5324EXPORT_SYMBOL(il_mac_reset_tsf);
5325
5326static void
5327il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5328{
5329 struct il_ht_config *ht_conf = &il->current_ht_config;
5330 struct ieee80211_sta *sta;
5331 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5332 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5333
5334 D_ASSOC("enter:\n");
5335
5336 if (!ctx->ht.enabled)
5337 return;
5338
5339 ctx->ht.protection =
5340 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5341 ctx->ht.non_gf_sta_present =
5342 !!(bss_conf->
5343 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5344
5345 ht_conf->single_chain_sufficient = false;
5346
5347 switch (vif->type) {
5348 case NL80211_IFTYPE_STATION:
5349 rcu_read_lock();
5350 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5351 if (sta) {
5352 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5353 int maxstreams;
5354
5355 maxstreams =
5356 (ht_cap->mcs.
5357 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5358 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5359 maxstreams += 1;
5360
5361 if (ht_cap->mcs.rx_mask[1] == 0 &&
5362 ht_cap->mcs.rx_mask[2] == 0)
5363 ht_conf->single_chain_sufficient = true;
5364 if (maxstreams <= 1)
5365 ht_conf->single_chain_sufficient = true;
5366 } else {
5367 /*
5368 * If at all, this can only happen through a race
5369 * when the AP disconnects us while we're still
5370 * setting up the connection, in that case mac80211
5371 * will soon tell us about that.
5372 */
5373 ht_conf->single_chain_sufficient = true;
5374 }
5375 rcu_read_unlock();
5376 break;
5377 case NL80211_IFTYPE_ADHOC:
5378 ht_conf->single_chain_sufficient = true;
5379 break;
5380 default:
5381 break;
5382 }
5383
5384 D_ASSOC("leave\n");
5385}
5386
5387static inline void
5388il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5389{
5390 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5391
5392 /*
5393 * inform the ucode that there is no longer an
5394 * association and that no more packets should be
5395 * sent
5396 */
5397 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5398 ctx->staging.assoc_id = 0;
5399 il_commit_rxon(il, ctx);
5400}
5401
5402static void
5403il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5404{
5405 struct il_priv *il = hw->priv;
5406 unsigned long flags;
5407 __le64 timestamp;
5408 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5409
5410 if (!skb)
5411 return;
5412
5413 D_MAC80211("enter\n");
5414
5415 lockdep_assert_held(&il->mutex);
5416
5417 if (!il->beacon_ctx) {
5418 IL_ERR("update beacon but no beacon context!\n");
5419 dev_kfree_skb(skb);
5420 return;
5421 }
5422
5423 spin_lock_irqsave(&il->lock, flags);
5424
5425 if (il->beacon_skb)
5426 dev_kfree_skb(il->beacon_skb);
5427
5428 il->beacon_skb = skb;
5429
5430 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5431 il->timestamp = le64_to_cpu(timestamp);
5432
5433 D_MAC80211("leave\n");
5434 spin_unlock_irqrestore(&il->lock, flags);
5435
5436 if (!il_is_ready_rf(il)) {
5437 D_MAC80211("leave - RF not ready\n");
5438 return;
5439 }
5440
5441 il->cfg->ops->legacy->post_associate(il);
5442}
5443
5444void
5445il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5446 struct ieee80211_bss_conf *bss_conf, u32 changes)
5447{
5448 struct il_priv *il = hw->priv;
5449 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5450 int ret;
5451
5452 if (WARN_ON(!il->cfg->ops->legacy))
5453 return;
5454
5455 D_MAC80211("changes = 0x%X\n", changes);
5456
5457 mutex_lock(&il->mutex);
5458
5459 if (!il_is_alive(il)) {
5460 mutex_unlock(&il->mutex);
5461 return;
5462 }
5463
5464 if (changes & BSS_CHANGED_QOS) {
5465 unsigned long flags;
5466
5467 spin_lock_irqsave(&il->lock, flags);
5468 ctx->qos_data.qos_active = bss_conf->qos;
5469 il_update_qos(il, ctx);
5470 spin_unlock_irqrestore(&il->lock, flags);
5471 }
5472
5473 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5474 /*
5475 * the add_interface code must make sure we only ever
5476 * have a single interface that could be beaconing at
5477 * any time.
5478 */
5479 if (vif->bss_conf.enable_beacon)
5480 il->beacon_ctx = ctx;
5481 else
5482 il->beacon_ctx = NULL;
5483 }
5484
5485 if (changes & BSS_CHANGED_BSSID) {
5486 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5487
5488 /*
5489 * If there is currently a HW scan going on in the
5490 * background then we need to cancel it else the RXON
5491 * below/in post_associate will fail.
5492 */
5493 if (il_scan_cancel_timeout(il, 100)) {
5494 IL_WARN("Aborted scan still in progress after 100ms\n");
5495 D_MAC80211("leaving - scan abort failed.\n");
5496 mutex_unlock(&il->mutex);
5497 return;
5498 }
5499
5500 /* mac80211 only sets assoc when in STATION mode */
5501 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
5502 memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
5503 ETH_ALEN);
5504
5505 /* currently needed in a few places */
5506 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5507 } else {
5508 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5509 }
5510
5511 }
5512
5513 /*
5514 * This needs to be after setting the BSSID in case
5515 * mac80211 decides to do both changes at once because
5516 * it will invoke post_associate.
5517 */
5518 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5519 il_beacon_update(hw, vif);
5520
5521 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5522 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5523 if (bss_conf->use_short_preamble)
5524 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5525 else
5526 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5527 }
5528
5529 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5530 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5531 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
5532 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5533 else
5534 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5535 if (bss_conf->use_cts_prot)
5536 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
5537 else
5538 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5539 }
5540
5541 if (changes & BSS_CHANGED_BASIC_RATES) {
5542 /* XXX use this information
5543 *
5544 * To do that, remove code from il_set_rate() and put something
5545 * like this here:
5546 *
5547 if (A-band)
5548 ctx->staging.ofdm_basic_rates =
5549 bss_conf->basic_rates;
5550 else
5551 ctx->staging.ofdm_basic_rates =
5552 bss_conf->basic_rates >> 4;
5553 ctx->staging.cck_basic_rates =
5554 bss_conf->basic_rates & 0xF;
5555 */
5556 }
5557
5558 if (changes & BSS_CHANGED_HT) {
5559 il_ht_conf(il, vif);
5560
5561 if (il->cfg->ops->hcmd->set_rxon_chain)
5562 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
5563 }
5564
5565 if (changes & BSS_CHANGED_ASSOC) {
5566 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5567 if (bss_conf->assoc) {
5568 il->timestamp = bss_conf->timestamp;
5569
5570 if (!il_is_rfkill(il))
5571 il->cfg->ops->legacy->post_associate(il);
5572 } else
5573 il_set_no_assoc(il, vif);
5574 }
5575
5576 if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
5577 D_MAC80211("Changes (%#x) while associated\n", changes);
5578 ret = il_send_rxon_assoc(il, ctx);
5579 if (!ret) {
5580 /* Sync active_rxon with latest change. */
5581 memcpy((void *)&ctx->active, &ctx->staging,
5582 sizeof(struct il_rxon_cmd));
5583 }
5584 }
5585
5586 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5587 if (vif->bss_conf.enable_beacon) {
5588 memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
5589 ETH_ALEN);
5590 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5591 il->cfg->ops->legacy->config_ap(il);
5592 } else
5593 il_set_no_assoc(il, vif);
5594 }
5595
5596 if (changes & BSS_CHANGED_IBSS) {
5597 ret =
5598 il->cfg->ops->legacy->manage_ibss_station(il, vif,
5599 bss_conf->
5600 ibss_joined);
5601 if (ret)
5602 IL_ERR("failed to %s IBSS station %pM\n",
5603 bss_conf->ibss_joined ? "add" : "remove",
5604 bss_conf->bssid);
5605 }
5606
5607 mutex_unlock(&il->mutex);
5608
5609 D_MAC80211("leave\n");
5610}
5611EXPORT_SYMBOL(il_mac_bss_info_changed);
5612
5613irqreturn_t
5614il_isr(int irq, void *data)
5615{
5616 struct il_priv *il = data;
5617 u32 inta, inta_mask;
5618 u32 inta_fh;
5619 unsigned long flags;
5620 if (!il)
5621 return IRQ_NONE;
5622
5623 spin_lock_irqsave(&il->lock, flags);
5624
5625 /* Disable (but don't clear!) interrupts here to avoid
5626 * back-to-back ISRs and sporadic interrupts from our NIC.
5627 * If we have something to service, the tasklet will re-enable ints.
5628 * If we *don't* have something, we'll re-enable before leaving here. */
5629 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
5630 _il_wr(il, CSR_INT_MASK, 0x00000000);
5631
5632 /* Discover which interrupts are active/pending */
5633 inta = _il_rd(il, CSR_INT);
5634 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5635
5636 /* Ignore interrupt if there's nothing in NIC to service.
5637 * This may be due to IRQ shared with another device,
5638 * or due to sporadic interrupts thrown from our NIC. */
5639 if (!inta && !inta_fh) {
5640 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5641 goto none;
5642 }
5643
5644 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5645 /* Hardware disappeared. It might have already raised
5646 * an interrupt */
5647 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5648 goto unplugged;
5649 }
5650
5651 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5652 inta_fh);
5653
5654 inta &= ~CSR_INT_BIT_SCD;
5655
5656 /* il_irq_tasklet() will service interrupts and re-enable them */
5657 if (likely(inta || inta_fh))
5658 tasklet_schedule(&il->irq_tasklet);
5659
5660unplugged:
5661 spin_unlock_irqrestore(&il->lock, flags);
5662 return IRQ_HANDLED;
5663
5664none:
5665 /* re-enable interrupts here since we don't have anything to service. */
5666 /* only Re-enable if disabled by irq */
5667 if (test_bit(S_INT_ENABLED, &il->status))
5668 il_enable_interrupts(il);
5669 spin_unlock_irqrestore(&il->lock, flags);
5670 return IRQ_NONE;
5671}
5672EXPORT_SYMBOL(il_isr);
5673
5674/*
5675 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
5676 * function.
5677 */
5678void
5679il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5680 __le16 fc, __le32 *tx_flags)
5681{
5682 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5683 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5684 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5685 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5686
5687 if (!ieee80211_is_mgmt(fc))
5688 return;
5689
5690 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5691 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5692 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5693 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5694 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5695 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5696 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5697 break;
5698 }
5699 } else if (info->control.rates[0].
5700 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5701 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5702 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5703 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5704 }
5705}
5706EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
new file mode 100644
index 000000000000..1bc0b02f559c
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -0,0 +1,3424 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#ifndef __il_core_h__
27#define __il_core_h__
28
29#include <linux/interrupt.h>
30#include <linux/pci.h> /* for struct pci_device_id */
31#include <linux/kernel.h>
32#include <linux/leds.h>
33#include <linux/wait.h>
34#include <net/mac80211.h>
35#include <net/ieee80211_radiotap.h>
36
37#include "commands.h"
38#include "csr.h"
39#include "prph.h"
40
41struct il_host_cmd;
42struct il_cmd;
43struct il_tx_queue;
44
45#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
46#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
47#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
48
49#define RX_QUEUE_SIZE 256
50#define RX_QUEUE_MASK 255
51#define RX_QUEUE_SIZE_LOG 8
52
53/*
54 * RX related structures and functions
55 */
56#define RX_FREE_BUFFERS 64
57#define RX_LOW_WATERMARK 8
58
59#define U32_PAD(n) ((4-(n))&0x3)
60
61/* CT-KILL constants */
62#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
63
64/* Default noise level to report when noise measurement is not available.
65 * This may be because we're:
66 * 1) Not associated (4965, no beacon stats being sent to driver)
67 * 2) Scanning (noise measurement does not apply to associated channel)
68 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
69 * Use default noise value of -127 ... this is below the range of measurable
70 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
71 * Also, -127 works better than 0 when averaging frames with/without
72 * noise info (e.g. averaging might be done in app); measured dBm values are
73 * always negative ... using a negative value as the default keeps all
74 * averages within an s8's (used in some apps) range of negative values. */
75#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
76
77/*
78 * RTS threshold here is total size [2347] minus 4 FCS bytes
79 * Per spec:
80 * a value of 0 means RTS on all data/management packets
81 * a value > max MSDU size means no RTS
82 * else RTS for data/management frames where MPDU is larger
83 * than RTS value.
84 */
85#define DEFAULT_RTS_THRESHOLD 2347U
86#define MIN_RTS_THRESHOLD 0U
87#define MAX_RTS_THRESHOLD 2347U
88#define MAX_MSDU_SIZE 2304U
89#define MAX_MPDU_SIZE 2346U
90#define DEFAULT_BEACON_INTERVAL 100U
91#define DEFAULT_SHORT_RETRY_LIMIT 7U
92#define DEFAULT_LONG_RETRY_LIMIT 4U
93
94struct il_rx_buf {
95 dma_addr_t page_dma;
96 struct page *page;
97 struct list_head list;
98};
99
100#define rxb_addr(r) page_address(r->page)
101
102/* defined below */
103struct il_device_cmd;
104
105struct il_cmd_meta {
106 /* only for SYNC commands, iff the reply skb is wanted */
107 struct il_host_cmd *source;
108 /*
109 * only for ASYNC commands
110 * (which is somewhat stupid -- look at common.c for instance
111 * which duplicates a bunch of code because the callback isn't
112 * invoked for SYNC commands, if it were and its result passed
113 * through it would be simpler...)
114 */
115 void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
116 struct il_rx_pkt *pkt);
117
118 /* The CMD_SIZE_HUGE flag bit indicates that the command
119 * structure is stored at the end of the shared queue memory. */
120 u32 flags;
121
122 DEFINE_DMA_UNMAP_ADDR(mapping);
123 DEFINE_DMA_UNMAP_LEN(len);
124};
125
126/*
127 * Generic queue structure
128 *
129 * Contains common data for Rx and Tx queues
130 */
131struct il_queue {
132 int n_bd; /* number of BDs in this queue */
133 int write_ptr; /* 1-st empty entry (idx) host_w */
134 int read_ptr; /* last used entry (idx) host_r */
135 /* use for monitoring and recovering the stuck queue */
136 dma_addr_t dma_addr; /* physical addr for BD's */
137 int n_win; /* safe queue win */
138 u32 id;
139 int low_mark; /* low watermark, resume queue if free
140 * space more than this */
141 int high_mark; /* high watermark, stop queue if free
142 * space less than this */
143};
144
145/* One for each TFD */
146struct il_tx_info {
147 struct sk_buff *skb;
148 struct il_rxon_context *ctx;
149};
150
151/**
152 * struct il_tx_queue - Tx Queue for DMA
153 * @q: generic Rx/Tx queue descriptor
154 * @bd: base of circular buffer of TFDs
155 * @cmd: array of command/TX buffer pointers
156 * @meta: array of meta data for each command/tx buffer
157 * @dma_addr_cmd: physical address of cmd/tx buffer array
158 * @txb: array of per-TFD driver data
159 * @time_stamp: time (in jiffies) of last read_ptr change
160 * @need_update: indicates need to update read/write idx
161 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
162 *
163 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
164 * descriptors) and required locking structures.
165 */
166#define TFD_TX_CMD_SLOTS 256
167#define TFD_CMD_SLOTS 32
168
169struct il_tx_queue {
170 struct il_queue q;
171 void *tfds;
172 struct il_device_cmd **cmd;
173 struct il_cmd_meta *meta;
174 struct il_tx_info *txb;
175 unsigned long time_stamp;
176 u8 need_update;
177 u8 sched_retry;
178 u8 active;
179 u8 swq_id;
180};
181
182/*
183 * EEPROM access time values:
184 *
185 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
186 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
187 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
188 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
189 */
190#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
191
192#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
193#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
194
195/*
196 * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
197 *
198 * IBSS and/or AP operation is allowed *only* on those channels with
199 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
200 * RADAR detection is not supported by the 4965 driver, but is a
201 * requirement for establishing a new network for legal operation on channels
202 * requiring RADAR detection or restricting ACTIVE scanning.
203 *
204 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
205 * It only indicates that 20 MHz channel use is supported; HT40 channel
206 * usage is indicated by a separate set of regulatory flags for each
207 * HT40 channel pair.
208 *
209 * NOTE: Using a channel inappropriately will result in a uCode error!
210 */
211#define IL_NUM_TX_CALIB_GROUPS 5
212enum {
213 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
214 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
215 /* Bit 2 Reserved */
216 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
217 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
218 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
219 /* Bit 6 Reserved (was Narrow Channel) */
220 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
221};
222
223/* SKU Capabilities */
224/* 3945 only */
225#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
226#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
227
228/* *regulatory* channel data format in eeprom, one for each channel.
229 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
230struct il_eeprom_channel {
231 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
232 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
233} __packed;
234
235/* 3945 Specific */
236#define EEPROM_3945_EEPROM_VERSION (0x2f)
237
238/* 4965 has two radio transmitters (and 3 radio receivers) */
239#define EEPROM_TX_POWER_TX_CHAINS (2)
240
241/* 4965 has room for up to 8 sets of txpower calibration data */
242#define EEPROM_TX_POWER_BANDS (8)
243
244/* 4965 factory calibration measures txpower gain settings for
245 * each of 3 target output levels */
246#define EEPROM_TX_POWER_MEASUREMENTS (3)
247
248/* 4965 Specific */
249/* 4965 driver does not work with txpower calibration version < 5 */
250#define EEPROM_4965_TX_POWER_VERSION (5)
251#define EEPROM_4965_EEPROM_VERSION (0x2f)
252#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
253#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
254#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
255#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
256
257/* 2.4 GHz */
258extern const u8 il_eeprom_band_1[14];
259
260/*
261 * factory calibration data for one txpower level, on one channel,
262 * measured on one of the 2 tx chains (radio transmitter and associated
263 * antenna). EEPROM contains:
264 *
265 * 1) Temperature (degrees Celsius) of device when measurement was made.
266 *
267 * 2) Gain table idx used to achieve the target measurement power.
268 * This refers to the "well-known" gain tables (see 4965.h).
269 *
270 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
271 *
272 * 4) RF power amplifier detector level measurement (not used).
273 */
274struct il_eeprom_calib_measure {
275 u8 temperature; /* Device temperature (Celsius) */
276 u8 gain_idx; /* Index into gain table */
277 u8 actual_pow; /* Measured RF output power, half-dBm */
278 s8 pa_det; /* Power amp detector level (not used) */
279} __packed;
280
281/*
282 * measurement set for one channel. EEPROM contains:
283 *
284 * 1) Channel number measured
285 *
286 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
287 * (a.k.a. "tx chains") (6 measurements altogether)
288 */
289struct il_eeprom_calib_ch_info {
290 u8 ch_num;
291 struct il_eeprom_calib_measure
292 measurements[EEPROM_TX_POWER_TX_CHAINS]
293 [EEPROM_TX_POWER_MEASUREMENTS];
294} __packed;
295
296/*
297 * txpower subband info.
298 *
299 * For each frequency subband, EEPROM contains the following:
300 *
301 * 1) First and last channels within range of the subband. "0" values
302 * indicate that this sample set is not being used.
303 *
304 * 2) Sample measurement sets for 2 channels close to the range endpoints.
305 */
306struct il_eeprom_calib_subband_info {
307 u8 ch_from; /* channel number of lowest channel in subband */
308 u8 ch_to; /* channel number of highest channel in subband */
309 struct il_eeprom_calib_ch_info ch1;
310 struct il_eeprom_calib_ch_info ch2;
311} __packed;
312
313/*
314 * txpower calibration info. EEPROM contains:
315 *
316 * 1) Factory-measured saturation power levels (maximum levels at which
317 * tx power amplifier can output a signal without too much distortion).
318 * There is one level for 2.4 GHz band and one for 5 GHz band. These
319 * values apply to all channels within each of the bands.
320 *
321 * 2) Factory-measured power supply voltage level. This is assumed to be
322 * constant (i.e. same value applies to all channels/bands) while the
323 * factory measurements are being made.
324 *
325 * 3) Up to 8 sets of factory-measured txpower calibration values.
326 * These are for different frequency ranges, since txpower gain
327 * characteristics of the analog radio circuitry vary with frequency.
328 *
329 * Not all sets need to be filled with data;
330 * struct il_eeprom_calib_subband_info contains range of channels
331 * (0 if unused) for each set of data.
332 */
333struct il_eeprom_calib_info {
334 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
335 u8 saturation_power52; /* half-dBm */
336 __le16 voltage; /* signed */
337 struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
338} __packed;
339
340/* General */
341#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
342#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
343#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
344#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
345#define EEPROM_VERSION (2*0x44) /* 2 bytes */
346#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
347#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
348#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
349#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
350#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
351
352/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
353#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
354#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
355#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
356#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
357#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
358#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
359
360#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
361#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
362
363/*
364 * Per-channel regulatory data.
365 *
366 * Each channel that *might* be supported by iwl has a fixed location
367 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
368 * txpower (MSB).
369 *
370 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
371 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
372 *
373 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
374 */
375#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
376#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
377#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
378
379/*
380 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
381 * 5.0 GHz channels 7, 8, 11, 12, 16
382 * (4915-5080MHz) (none of these is ever supported)
383 */
384#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
385#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
386
387/*
388 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
389 * (5170-5320MHz)
390 */
391#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
392#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
393
394/*
395 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
396 * (5500-5700MHz)
397 */
398#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
399#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
400
401/*
402 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
403 * (5725-5825MHz)
404 */
405#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
406#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
407
408/*
409 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
410 *
411 * The channel listed is the center of the lower 20 MHz half of the channel.
412 * The overall center frequency is actually 2 channels (10 MHz) above that,
413 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
414 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
415 * and the overall HT40 channel width centers on channel 3.
416 *
417 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
418 * control channel to which to tune. RXON also specifies whether the
419 * control channel is the upper or lower half of a HT40 channel.
420 *
421 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
422 */
423#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
424
425/*
426 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
427 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
428 */
429#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
430
431#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
432
433struct il_eeprom_ops {
434 const u32 regulatory_bands[7];
435 int (*acquire_semaphore) (struct il_priv *il);
436 void (*release_semaphore) (struct il_priv *il);
437};
438
439int il_eeprom_init(struct il_priv *il);
440void il_eeprom_free(struct il_priv *il);
441const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
442u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
443int il_init_channel_map(struct il_priv *il);
444void il_free_channel_map(struct il_priv *il);
445const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
446 enum ieee80211_band band,
447 u16 channel);
448
449#define IL_NUM_SCAN_RATES (2)
450
451struct il4965_channel_tgd_info {
452 u8 type;
453 s8 max_power;
454};
455
456struct il4965_channel_tgh_info {
457 s64 last_radar_time;
458};
459
460#define IL4965_MAX_RATE (33)
461
462struct il3945_clip_group {
463 /* maximum power level to prevent clipping for each rate, derived by
464 * us from this band's saturation power in EEPROM */
465 const s8 clip_powers[IL_MAX_RATES];
466};
467
468/* current Tx power values to use, one for each rate for each channel.
469 * requested power is limited by:
470 * -- regulatory EEPROM limits for this channel
471 * -- hardware capabilities (clip-powers)
472 * -- spectrum management
473 * -- user preference (e.g. iwconfig)
474 * when requested power is set, base power idx must also be set. */
475struct il3945_channel_power_info {
476 struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
477 s8 power_table_idx; /* actual (compenst'd) idx into gain table */
478 s8 base_power_idx; /* gain idx for power at factory temp. */
479 s8 requested_power; /* power (dBm) requested for this chnl/rate */
480};
481
482/* current scan Tx power values to use, one for each scan rate for each
483 * channel. */
484struct il3945_scan_power_info {
485 struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
486 s8 power_table_idx; /* actual (compenst'd) idx into gain table */
487 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
488};
489
490/*
491 * One for each channel, holds all channel setup data
492 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
493 * with one another!
494 */
495struct il_channel_info {
496 struct il4965_channel_tgd_info tgd;
497 struct il4965_channel_tgh_info tgh;
498 struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */
499 struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
500 * HT40 channel */
501
502 u8 channel; /* channel number */
503 u8 flags; /* flags copied from EEPROM */
504 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
505 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
506 s8 min_power; /* always 0 */
507 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
508
509 u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
510 u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
511 enum ieee80211_band band;
512
513 /* HT40 channel info */
514 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
515 u8 ht40_flags; /* flags copied from EEPROM */
516 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
517
518 /* Radio/DSP gain settings for each "normal" data Tx rate.
519 * These include, in addition to RF and DSP gain, a few fields for
520 * remembering/modifying gain settings (idxes). */
521 struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
522
523 /* Radio/DSP gain settings for each scan rate, for directed scans. */
524 struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
525};
526
527#define IL_TX_FIFO_BK 0 /* shared */
528#define IL_TX_FIFO_BE 1
529#define IL_TX_FIFO_VI 2 /* shared */
530#define IL_TX_FIFO_VO 3
531#define IL_TX_FIFO_UNUSED -1
532
533/* Minimum number of queues. MAX_NUM is defined in hw specific files.
534 * Set the minimum to accommodate the 4 standard TX queues, 1 command
535 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
536#define IL_MIN_NUM_QUEUES 10
537
538#define IL_DEFAULT_CMD_QUEUE_NUM 4
539
540#define IEEE80211_DATA_LEN 2304
541#define IEEE80211_4ADDR_LEN 30
542#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
543#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
544
545struct il_frame {
546 union {
547 struct ieee80211_hdr frame;
548 struct il_tx_beacon_cmd beacon;
549 u8 raw[IEEE80211_FRAME_LEN];
550 u8 cmd[360];
551 } u;
552 struct list_head list;
553};
554
555#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
556#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
557#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
558
559enum {
560 CMD_SYNC = 0,
561 CMD_SIZE_NORMAL = 0,
562 CMD_NO_SKB = 0,
563 CMD_SIZE_HUGE = (1 << 0),
564 CMD_ASYNC = (1 << 1),
565 CMD_WANT_SKB = (1 << 2),
566 CMD_MAPPED = (1 << 3),
567};
568
569#define DEF_CMD_PAYLOAD_SIZE 320
570
571/**
572 * struct il_device_cmd
573 *
574 * For allocation of the command and tx queues, this establishes the overall
575 * size of the largest command we send to uCode, except for a scan command
576 * (which is relatively huge; space is allocated separately).
577 */
578struct il_device_cmd {
579 struct il_cmd_header hdr; /* uCode API */
580 union {
581 u32 flags;
582 u8 val8;
583 u16 val16;
584 u32 val32;
585 struct il_tx_cmd tx;
586 u8 payload[DEF_CMD_PAYLOAD_SIZE];
587 } __packed cmd;
588} __packed;
589
590#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
591
592struct il_host_cmd {
593 const void *data;
594 unsigned long reply_page;
595 void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
596 struct il_rx_pkt *pkt);
597 u32 flags;
598 u16 len;
599 u8 id;
600};
601
602#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
603#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
604#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
605
606/**
607 * struct il_rx_queue - Rx queue
608 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
609 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
610 * @read: Shared idx to newest available Rx buffer
611 * @write: Shared idx to oldest written Rx packet
612 * @free_count: Number of pre-allocated buffers in rx_free
613 * @rx_free: list of free SKBs for use
614 * @rx_used: List of Rx buffers with no SKB
615 * @need_update: flag to indicate we need to update read/write idx
616 * @rb_stts: driver's pointer to receive buffer status
617 * @rb_stts_dma: bus address of receive buffer status
618 *
619 * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs
620 */
621struct il_rx_queue {
622 __le32 *bd;
623 dma_addr_t bd_dma;
624 struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
625 struct il_rx_buf *queue[RX_QUEUE_SIZE];
626 u32 read;
627 u32 write;
628 u32 free_count;
629 u32 write_actual;
630 struct list_head rx_free;
631 struct list_head rx_used;
632 int need_update;
633 struct il_rb_status *rb_stts;
634 dma_addr_t rb_stts_dma;
635 spinlock_t lock;
636};
637
638#define IL_SUPPORTED_RATES_IE_LEN 8
639
640#define MAX_TID_COUNT 9
641
642#define IL_INVALID_RATE 0xFF
643#define IL_INVALID_VALUE -1
644
645/**
646 * struct il_ht_agg -- aggregation status while waiting for block-ack
647 * @txq_id: Tx queue used for Tx attempt
648 * @frame_count: # frames attempted by Tx command
649 * @wait_for_ba: Expect block-ack before next Tx reply
650 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
651 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
652 * @bitmap1: High order, one bit for each frame pending ACK in Tx win
653 * @rate_n_flags: Rate at which Tx was attempted
654 *
655 * If C_TX indicates that aggregation was attempted, driver must wait
656 * for block ack (N_COMPRESSED_BA). This struct stores tx reply info
657 * until block ack arrives.
658 */
659struct il_ht_agg {
660 u16 txq_id;
661 u16 frame_count;
662 u16 wait_for_ba;
663 u16 start_idx;
664 u64 bitmap;
665 u32 rate_n_flags;
666#define IL_AGG_OFF 0
667#define IL_AGG_ON 1
668#define IL_EMPTYING_HW_QUEUE_ADDBA 2
669#define IL_EMPTYING_HW_QUEUE_DELBA 3
670 u8 state;
671};
672
673struct il_tid_data {
674 u16 seq_number; /* 4965 only */
675 u16 tfds_in_queue;
676 struct il_ht_agg agg;
677};
678
679struct il_hw_key {
680 u32 cipher;
681 int keylen;
682 u8 keyidx;
683 u8 key[32];
684};
685
686union il_ht_rate_supp {
687 u16 rates;
688 struct {
689 u8 siso_rate;
690 u8 mimo_rate;
691 };
692};
693
694#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
695#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
696#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
697#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
698#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
699#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
700#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
701
702/*
703 * Maximal MPDU density for TX aggregation
704 * 4 - 2us density
705 * 5 - 4us density
706 * 6 - 8us density
707 * 7 - 16us density
708 */
709#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
710#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
711#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
712#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
713#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
714#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
715#define CFG_HT_MPDU_DENSITY_MIN (0x1)
716
717struct il_ht_config {
718 bool single_chain_sufficient;
719 enum ieee80211_smps_mode smps; /* current smps mode */
720};
721
722/* QoS structures */
723struct il_qos_info {
724 int qos_active;
725 struct il_qosparam_cmd def_qos_parm;
726};
727
728/*
729 * Structure should be accessed with sta_lock held. When station addition
730 * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
731 * the commands (il_addsta_cmd and il_link_quality_cmd) without
732 * sta_lock held.
733 */
734struct il_station_entry {
735 struct il_addsta_cmd sta;
736 struct il_tid_data tid[MAX_TID_COUNT];
737 u8 used, ctxid;
738 struct il_hw_key keyinfo;
739 struct il_link_quality_cmd *lq;
740};
741
742struct il_station_priv_common {
743 struct il_rxon_context *ctx;
744 u8 sta_id;
745};
746
747/**
748 * struct il_vif_priv - driver's ilate per-interface information
749 *
750 * When mac80211 allocates a virtual interface, it can allocate
751 * space for us to put data into.
752 */
753struct il_vif_priv {
754 struct il_rxon_context *ctx;
755 u8 ibss_bssid_sta_id;
756};
757
758/* one for each uCode image (inst/data, boot/init/runtime) */
759struct fw_desc {
760 void *v_addr; /* access by driver */
761 dma_addr_t p_addr; /* access by card's busmaster DMA */
762 u32 len; /* bytes */
763};
764
765/* uCode file layout */
766struct il_ucode_header {
767 __le32 ver; /* major/minor/API/serial */
768 struct {
769 __le32 inst_size; /* bytes of runtime code */
770 __le32 data_size; /* bytes of runtime data */
771 __le32 init_size; /* bytes of init code */
772 __le32 init_data_size; /* bytes of init data */
773 __le32 boot_size; /* bytes of bootstrap code */
774 u8 data[0]; /* in same order as sizes */
775 } v1;
776};
777
778struct il4965_ibss_seq {
779 u8 mac[ETH_ALEN];
780 u16 seq_num;
781 u16 frag_num;
782 unsigned long packet_time;
783 struct list_head list;
784};
785
786struct il_sensitivity_ranges {
787 u16 min_nrg_cck;
788 u16 max_nrg_cck;
789
790 u16 nrg_th_cck;
791 u16 nrg_th_ofdm;
792
793 u16 auto_corr_min_ofdm;
794 u16 auto_corr_min_ofdm_mrc;
795 u16 auto_corr_min_ofdm_x1;
796 u16 auto_corr_min_ofdm_mrc_x1;
797
798 u16 auto_corr_max_ofdm;
799 u16 auto_corr_max_ofdm_mrc;
800 u16 auto_corr_max_ofdm_x1;
801 u16 auto_corr_max_ofdm_mrc_x1;
802
803 u16 auto_corr_max_cck;
804 u16 auto_corr_max_cck_mrc;
805 u16 auto_corr_min_cck;
806 u16 auto_corr_min_cck_mrc;
807
808 u16 barker_corr_th_min;
809 u16 barker_corr_th_min_mrc;
810 u16 nrg_th_cca;
811};
812
813#define KELVIN_TO_CELSIUS(x) ((x)-273)
814#define CELSIUS_TO_KELVIN(x) ((x)+273)
815
816/**
817 * struct il_hw_params
818 * @max_txq_num: Max # Tx queues supported
819 * @dma_chnl_num: Number of Tx DMA/FIFO channels
820 * @scd_bc_tbls_size: size of scheduler byte count tables
821 * @tfd_size: TFD size
822 * @tx/rx_chains_num: Number of TX/RX chains
823 * @valid_tx/rx_ant: usable antennas
824 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
825 * @max_rxq_log: Log-base-2 of max_rxq_size
826 * @rx_page_order: Rx buffer page order
827 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
828 * @max_stations:
829 * @ht40_channel: is 40MHz width possible in band 2.4
830 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
831 * @sw_crypto: 0 for hw, 1 for sw
832 * @max_xxx_size: for ucode uses
833 * @ct_kill_threshold: temperature threshold
834 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
835 * @struct il_sensitivity_ranges: range of sensitivity values
836 */
837struct il_hw_params {
838 u8 max_txq_num;
839 u8 dma_chnl_num;
840 u16 scd_bc_tbls_size;
841 u32 tfd_size;
842 u8 tx_chains_num;
843 u8 rx_chains_num;
844 u8 valid_tx_ant;
845 u8 valid_rx_ant;
846 u16 max_rxq_size;
847 u16 max_rxq_log;
848 u32 rx_page_order;
849 u32 rx_wrt_ptr_reg;
850 u8 max_stations;
851 u8 ht40_channel;
852 u8 max_beacon_itrvl; /* in 1024 ms */
853 u32 max_inst_size;
854 u32 max_data_size;
855 u32 max_bsm_size;
856 u32 ct_kill_threshold; /* value in hw-dependent units */
857 u16 beacon_time_tsf_bits;
858 const struct il_sensitivity_ranges *sens;
859};
860
861/******************************************************************************
862 *
863 * Functions implemented in core module which are forward declared here
864 * for use by iwl-[4-5].c
865 *
866 * NOTE: The implementation of these functions are not hardware specific
867 * which is why they are in the core module files.
868 *
869 * Naming convention --
870 * il_ <-- Is part of iwlwifi
871 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
872 * il4965_bg_ <-- Called from work queue context
873 * il4965_mac_ <-- mac80211 callback
874 *
875 ****************************************************************************/
876extern void il4965_update_chain_flags(struct il_priv *il);
877extern const u8 il_bcast_addr[ETH_ALEN];
878extern int il_queue_space(const struct il_queue *q);
879static inline int
880il_queue_used(const struct il_queue *q, int i)
881{
882 return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
883 i < q->write_ptr) : !(i <
884 q->read_ptr
885 && i >=
886 q->
887 write_ptr);
888}
889
890static inline u8
891il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
892{
893 /*
894 * This is for init calibration result and scan command which
895 * required buffer > TFD_MAX_PAYLOAD_SIZE,
896 * the big buffer at end of command array
897 */
898 if (is_huge)
899 return q->n_win; /* must be power of 2 */
900
901 /* Otherwise, use normal size buffers */
902 return idx & (q->n_win - 1);
903}
904
905struct il_dma_ptr {
906 dma_addr_t dma;
907 void *addr;
908 size_t size;
909};
910
911#define IL_OPERATION_MODE_AUTO 0
912#define IL_OPERATION_MODE_HT_ONLY 1
913#define IL_OPERATION_MODE_MIXED 2
914#define IL_OPERATION_MODE_20MHZ 3
915
916#define IL_TX_CRC_SIZE 4
917#define IL_TX_DELIMITER_SIZE 4
918
919#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
920
921/* Sensitivity and chain noise calibration */
922#define INITIALIZATION_VALUE 0xFFFF
923#define IL4965_CAL_NUM_BEACONS 20
924#define IL_CAL_NUM_BEACONS 16
925#define MAXIMUM_ALLOWED_PATHLOSS 15
926
927#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
928
929#define MAX_FA_OFDM 50
930#define MIN_FA_OFDM 5
931#define MAX_FA_CCK 50
932#define MIN_FA_CCK 5
933
934#define AUTO_CORR_STEP_OFDM 1
935
936#define AUTO_CORR_STEP_CCK 3
937#define AUTO_CORR_MAX_TH_CCK 160
938
939#define NRG_DIFF 2
940#define NRG_STEP_CCK 2
941#define NRG_MARGIN 8
942#define MAX_NUMBER_CCK_NO_FA 100
943
944#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
945
946#define CHAIN_A 0
947#define CHAIN_B 1
948#define CHAIN_C 2
949#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
950#define ALL_BAND_FILTER 0xFF00
951#define IN_BAND_FILTER 0xFF
952#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
953
954#define NRG_NUM_PREV_STAT_L 20
955#define NUM_RX_CHAINS 3
956
957enum il4965_false_alarm_state {
958 IL_FA_TOO_MANY = 0,
959 IL_FA_TOO_FEW = 1,
960 IL_FA_GOOD_RANGE = 2,
961};
962
963enum il4965_chain_noise_state {
964 IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
965 IL_CHAIN_NOISE_ACCUMULATE,
966 IL_CHAIN_NOISE_CALIBRATED,
967 IL_CHAIN_NOISE_DONE,
968};
969
970enum il4965_calib_enabled_state {
971 IL_CALIB_DISABLED = 0, /* must be 0 */
972 IL_CALIB_ENABLED = 1,
973};
974
975/*
976 * enum il_calib
977 * defines the order in which results of initial calibrations
978 * should be sent to the runtime uCode
979 */
980enum il_calib {
981 IL_CALIB_MAX,
982};
983
984/* Opaque calibration results */
985struct il_calib_result {
986 void *buf;
987 size_t buf_len;
988};
989
990enum ucode_type {
991 UCODE_NONE = 0,
992 UCODE_INIT,
993 UCODE_RT
994};
995
996/* Sensitivity calib data */
997struct il_sensitivity_data {
998 u32 auto_corr_ofdm;
999 u32 auto_corr_ofdm_mrc;
1000 u32 auto_corr_ofdm_x1;
1001 u32 auto_corr_ofdm_mrc_x1;
1002 u32 auto_corr_cck;
1003 u32 auto_corr_cck_mrc;
1004
1005 u32 last_bad_plcp_cnt_ofdm;
1006 u32 last_fa_cnt_ofdm;
1007 u32 last_bad_plcp_cnt_cck;
1008 u32 last_fa_cnt_cck;
1009
1010 u32 nrg_curr_state;
1011 u32 nrg_prev_state;
1012 u32 nrg_value[10];
1013 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
1014 u32 nrg_silence_ref;
1015 u32 nrg_energy_idx;
1016 u32 nrg_silence_idx;
1017 u32 nrg_th_cck;
1018 s32 nrg_auto_corr_silence_diff;
1019 u32 num_in_cck_no_fa;
1020 u32 nrg_th_ofdm;
1021
1022 u16 barker_corr_th_min;
1023 u16 barker_corr_th_min_mrc;
1024 u16 nrg_th_cca;
1025};
1026
1027/* Chain noise (differential Rx gain) calib data */
1028struct il_chain_noise_data {
1029 u32 active_chains;
1030 u32 chain_noise_a;
1031 u32 chain_noise_b;
1032 u32 chain_noise_c;
1033 u32 chain_signal_a;
1034 u32 chain_signal_b;
1035 u32 chain_signal_c;
1036 u16 beacon_count;
1037 u8 disconn_array[NUM_RX_CHAINS];
1038 u8 delta_gain_code[NUM_RX_CHAINS];
1039 u8 radio_write;
1040 u8 state;
1041};
1042
1043#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
1044#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
1045
1046#define IL_TRAFFIC_ENTRIES (256)
1047#define IL_TRAFFIC_ENTRY_SIZE (64)
1048
1049enum {
1050 MEASUREMENT_READY = (1 << 0),
1051 MEASUREMENT_ACTIVE = (1 << 1),
1052};
1053
1054/* interrupt stats */
1055struct isr_stats {
1056 u32 hw;
1057 u32 sw;
1058 u32 err_code;
1059 u32 sch;
1060 u32 alive;
1061 u32 rfkill;
1062 u32 ctkill;
1063 u32 wakeup;
1064 u32 rx;
1065 u32 handlers[IL_CN_MAX];
1066 u32 tx;
1067 u32 unhandled;
1068};
1069
1070/* management stats */
1071enum il_mgmt_stats {
1072 MANAGEMENT_ASSOC_REQ = 0,
1073 MANAGEMENT_ASSOC_RESP,
1074 MANAGEMENT_REASSOC_REQ,
1075 MANAGEMENT_REASSOC_RESP,
1076 MANAGEMENT_PROBE_REQ,
1077 MANAGEMENT_PROBE_RESP,
1078 MANAGEMENT_BEACON,
1079 MANAGEMENT_ATIM,
1080 MANAGEMENT_DISASSOC,
1081 MANAGEMENT_AUTH,
1082 MANAGEMENT_DEAUTH,
1083 MANAGEMENT_ACTION,
1084 MANAGEMENT_MAX,
1085};
1086/* control stats */
1087enum il_ctrl_stats {
1088 CONTROL_BACK_REQ = 0,
1089 CONTROL_BACK,
1090 CONTROL_PSPOLL,
1091 CONTROL_RTS,
1092 CONTROL_CTS,
1093 CONTROL_ACK,
1094 CONTROL_CFEND,
1095 CONTROL_CFENDACK,
1096 CONTROL_MAX,
1097};
1098
1099struct traffic_stats {
1100#ifdef CONFIG_IWLEGACY_DEBUGFS
1101 u32 mgmt[MANAGEMENT_MAX];
1102 u32 ctrl[CONTROL_MAX];
1103 u32 data_cnt;
1104 u64 data_bytes;
1105#endif
1106};
1107
1108/*
1109 * host interrupt timeout value
1110 * used with setting interrupt coalescing timer
1111 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
1112 *
1113 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
1114 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
1115 */
1116#define IL_HOST_INT_TIMEOUT_MAX (0xFF)
1117#define IL_HOST_INT_TIMEOUT_DEF (0x40)
1118#define IL_HOST_INT_TIMEOUT_MIN (0x0)
1119#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
1120#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
1121#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
1122
1123#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1124
1125/* TX queue watchdog timeouts in mSecs */
1126#define IL_DEF_WD_TIMEOUT (2000)
1127#define IL_LONG_WD_TIMEOUT (10000)
1128#define IL_MAX_WD_TIMEOUT (120000)
1129
1130struct il_force_reset {
1131 int reset_request_count;
1132 int reset_success_count;
1133 int reset_reject_count;
1134 unsigned long reset_duration;
1135 unsigned long last_force_reset_jiffies;
1136};
1137
1138/* extend beacon time format bit shifting */
1139/*
1140 * for _3945 devices
1141 * bits 31:24 - extended
1142 * bits 23:0 - interval
1143 */
1144#define IL3945_EXT_BEACON_TIME_POS 24
1145/*
1146 * for _4965 devices
1147 * bits 31:22 - extended
1148 * bits 21:0 - interval
1149 */
1150#define IL4965_EXT_BEACON_TIME_POS 22
1151
1152struct il_rxon_context {
1153 struct ieee80211_vif *vif;
1154
1155 const u8 *ac_to_fifo;
1156 const u8 *ac_to_queue;
1157 u8 mcast_queue;
1158
1159 /*
1160 * We could use the vif to indicate active, but we
1161 * also need it to be active during disabling when
1162 * we already removed the vif for type setting.
1163 */
1164 bool always_active, is_active;
1165
1166 bool ht_need_multiple_chains;
1167
1168 int ctxid;
1169
1170 u32 interface_modes, exclusive_interface_modes;
1171 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
1172
1173 /*
1174 * We declare this const so it can only be
1175 * changed via explicit cast within the
1176 * routines that actually update the physical
1177 * hardware.
1178 */
1179 const struct il_rxon_cmd active;
1180 struct il_rxon_cmd staging;
1181
1182 struct il_rxon_time_cmd timing;
1183
1184 struct il_qos_info qos_data;
1185
1186 u8 bcast_sta_id, ap_sta_id;
1187
1188 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
1189 u8 qos_cmd;
1190 u8 wep_key_cmd;
1191
1192 struct il_wep_key wep_keys[WEP_KEYS_MAX];
1193 u8 key_mapping_keys;
1194
1195 __le32 station_flags;
1196
1197 struct {
1198 bool non_gf_sta_present;
1199 u8 protection;
1200 bool enabled, is_40mhz;
1201 u8 extension_chan_offset;
1202 } ht;
1203};
1204
1205struct il_power_mgr {
1206 struct il_powertable_cmd sleep_cmd;
1207 struct il_powertable_cmd sleep_cmd_next;
1208 int debug_sleep_level_override;
1209 bool pci_pm;
1210};
1211
1212struct il_priv {
1213
1214 /* ieee device used by generic ieee processing code */
1215 struct ieee80211_hw *hw;
1216 struct ieee80211_channel *ieee_channels;
1217 struct ieee80211_rate *ieee_rates;
1218 struct il_cfg *cfg;
1219
1220 /* temporary frame storage list */
1221 struct list_head free_frames;
1222 int frames_count;
1223
1224 enum ieee80211_band band;
1225 int alloc_rxb_page;
1226
1227 void (*handlers[IL_CN_MAX]) (struct il_priv *il,
1228 struct il_rx_buf *rxb);
1229
1230 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1231
1232 /* spectrum measurement report caching */
1233 struct il_spectrum_notification measure_report;
1234 u8 measurement_status;
1235
1236 /* ucode beacon time */
1237 u32 ucode_beacon_time;
1238 int missed_beacon_threshold;
1239
1240 /* track IBSS manager (last beacon) status */
1241 u32 ibss_manager;
1242
1243 /* force reset */
1244 struct il_force_reset force_reset;
1245
1246 /* we allocate array of il_channel_info for NIC's valid channels.
1247 * Access via channel # using indirect idx array */
1248 struct il_channel_info *channel_info; /* channel info array */
1249 u8 channel_count; /* # of channels */
1250
1251 /* thermal calibration */
1252 s32 temperature; /* degrees Kelvin */
1253 s32 last_temperature;
1254
1255 /* init calibration results */
1256 struct il_calib_result calib_results[IL_CALIB_MAX];
1257
1258 /* Scan related variables */
1259 unsigned long scan_start;
1260 unsigned long scan_start_tsf;
1261 void *scan_cmd;
1262 enum ieee80211_band scan_band;
1263 struct cfg80211_scan_request *scan_request;
1264 struct ieee80211_vif *scan_vif;
1265 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1266 u8 mgmt_tx_ant;
1267
1268 /* spinlock */
1269 spinlock_t lock; /* protect general shared data */
1270 spinlock_t hcmd_lock; /* protect hcmd */
1271 spinlock_t reg_lock; /* protect hw register access */
1272 struct mutex mutex;
1273
1274 /* basic pci-network driver stuff */
1275 struct pci_dev *pci_dev;
1276
1277 /* pci hardware address support */
1278 void __iomem *hw_base;
1279 u32 hw_rev;
1280 u32 hw_wa_rev;
1281 u8 rev_id;
1282
1283 /* command queue number */
1284 u8 cmd_queue;
1285
1286 /* max number of station keys */
1287 u8 sta_key_max_num;
1288
1289 /* EEPROM MAC addresses */
1290 struct mac_address addresses[1];
1291
1292 /* uCode images, save to reload in case of failure */
1293 int fw_idx; /* firmware we're trying to load */
1294 u32 ucode_ver; /* version of ucode, copy of
1295 il_ucode.ver */
1296 struct fw_desc ucode_code; /* runtime inst */
1297 struct fw_desc ucode_data; /* runtime data original */
1298 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1299 struct fw_desc ucode_init; /* initialization inst */
1300 struct fw_desc ucode_init_data; /* initialization data */
1301 struct fw_desc ucode_boot; /* bootstrap inst */
1302 enum ucode_type ucode_type;
1303 u8 ucode_write_complete; /* the image write is complete */
1304 char firmware_name[25];
1305
1306 struct il_rxon_context ctx;
1307
1308 __le16 switch_channel;
1309
1310 /* 1st responses from initialize and runtime uCode images.
1311 * _4965's initialize alive response contains some calibration data. */
1312 struct il_init_alive_resp card_alive_init;
1313 struct il_alive_resp card_alive;
1314
1315 u16 active_rate;
1316
1317 u8 start_calib;
1318 struct il_sensitivity_data sensitivity_data;
1319 struct il_chain_noise_data chain_noise_data;
1320 __le16 sensitivity_tbl[HD_TBL_SIZE];
1321
1322 struct il_ht_config current_ht_config;
1323
1324 /* Rate scaling data */
1325 u8 retry_rate;
1326
1327 wait_queue_head_t wait_command_queue;
1328
1329 int activity_timer_active;
1330
1331 /* Rx and Tx DMA processing queues */
1332 struct il_rx_queue rxq;
1333 struct il_tx_queue *txq;
1334 unsigned long txq_ctx_active_msk;
1335 struct il_dma_ptr kw; /* keep warm address */
1336 struct il_dma_ptr scd_bc_tbls;
1337
1338 u32 scd_base_addr; /* scheduler sram base address */
1339
1340 unsigned long status;
1341
1342 /* counts mgmt, ctl, and data packets */
1343 struct traffic_stats tx_stats;
1344 struct traffic_stats rx_stats;
1345
1346 /* counts interrupts */
1347 struct isr_stats isr_stats;
1348
1349 struct il_power_mgr power_data;
1350
1351 /* context information */
1352 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1353
1354 /* station table variables */
1355
1356 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1357 spinlock_t sta_lock;
1358 int num_stations;
1359 struct il_station_entry stations[IL_STATION_COUNT];
1360 unsigned long ucode_key_table;
1361
1362 /* queue refcounts */
1363#define IL_MAX_HW_QUEUES 32
1364 unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
1365 /* for each AC */
1366 atomic_t queue_stop_count[4];
1367
1368 /* Indication if ieee80211_ops->open has been called */
1369 u8 is_open;
1370
1371 u8 mac80211_registered;
1372
1373 /* eeprom -- this is in the card's little endian byte order */
1374 u8 *eeprom;
1375 struct il_eeprom_calib_info *calib_info;
1376
1377 enum nl80211_iftype iw_mode;
1378
1379 /* Last Rx'd beacon timestamp */
1380 u64 timestamp;
1381
1382 union {
1383#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1384 struct {
1385 void *shared_virt;
1386 dma_addr_t shared_phys;
1387
1388 struct delayed_work thermal_periodic;
1389 struct delayed_work rfkill_poll;
1390
1391 struct il3945_notif_stats stats;
1392#ifdef CONFIG_IWLEGACY_DEBUGFS
1393 struct il3945_notif_stats accum_stats;
1394 struct il3945_notif_stats delta_stats;
1395 struct il3945_notif_stats max_delta;
1396#endif
1397
1398 u32 sta_supp_rates;
1399 int last_rx_rssi; /* From Rx packet stats */
1400
1401 /* Rx'd packet timing information */
1402 u32 last_beacon_time;
1403 u64 last_tsf;
1404
1405 /*
1406 * each calibration channel group in the
1407 * EEPROM has a derived clip setting for
1408 * each rate.
1409 */
1410 const struct il3945_clip_group clip_groups[5];
1411
1412 } _3945;
1413#endif
1414#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1415 struct {
1416 struct il_rx_phy_res last_phy_res;
1417 bool last_phy_res_valid;
1418
1419 struct completion firmware_loading_complete;
1420
1421 /*
1422 * chain noise reset and gain commands are the
1423 * two extra calibration commands follows the standard
1424 * phy calibration commands
1425 */
1426 u8 phy_calib_chain_noise_reset_cmd;
1427 u8 phy_calib_chain_noise_gain_cmd;
1428
1429 struct il_notif_stats stats;
1430#ifdef CONFIG_IWLEGACY_DEBUGFS
1431 struct il_notif_stats accum_stats;
1432 struct il_notif_stats delta_stats;
1433 struct il_notif_stats max_delta;
1434#endif
1435
1436 } _4965;
1437#endif
1438 };
1439
1440 struct il_hw_params hw_params;
1441
1442 u32 inta_mask;
1443
1444 struct workqueue_struct *workqueue;
1445
1446 struct work_struct restart;
1447 struct work_struct scan_completed;
1448 struct work_struct rx_replenish;
1449 struct work_struct abort_scan;
1450
1451 struct il_rxon_context *beacon_ctx;
1452 struct sk_buff *beacon_skb;
1453
1454 struct work_struct tx_flush;
1455
1456 struct tasklet_struct irq_tasklet;
1457
1458 struct delayed_work init_alive_start;
1459 struct delayed_work alive_start;
1460 struct delayed_work scan_check;
1461
1462 /* TX Power */
1463 s8 tx_power_user_lmt;
1464 s8 tx_power_device_lmt;
1465 s8 tx_power_next;
1466
1467#ifdef CONFIG_IWLEGACY_DEBUG
1468 /* debugging info */
1469 u32 debug_level; /* per device debugging will override global
1470 il_debug_level if set */
1471#endif /* CONFIG_IWLEGACY_DEBUG */
1472#ifdef CONFIG_IWLEGACY_DEBUGFS
1473 /* debugfs */
1474 u16 tx_traffic_idx;
1475 u16 rx_traffic_idx;
1476 u8 *tx_traffic;
1477 u8 *rx_traffic;
1478 struct dentry *debugfs_dir;
1479 u32 dbgfs_sram_offset, dbgfs_sram_len;
1480 bool disable_ht40;
1481#endif /* CONFIG_IWLEGACY_DEBUGFS */
1482
1483 struct work_struct txpower_work;
1484 u32 disable_sens_cal;
1485 u32 disable_chain_noise_cal;
1486 u32 disable_tx_power_cal;
1487 struct work_struct run_time_calib_work;
1488 struct timer_list stats_periodic;
1489 struct timer_list watchdog;
1490 bool hw_ready;
1491
1492 struct led_classdev led;
1493 unsigned long blink_on, blink_off;
1494 bool led_registered;
1495}; /*il_priv */
1496
1497static inline void
1498il_txq_ctx_activate(struct il_priv *il, int txq_id)
1499{
1500 set_bit(txq_id, &il->txq_ctx_active_msk);
1501}
1502
1503static inline void
1504il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
1505{
1506 clear_bit(txq_id, &il->txq_ctx_active_msk);
1507}
1508
1509static inline struct ieee80211_hdr *
1510il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
1511{
1512 if (il->txq[txq_id].txb[idx].skb)
1513 return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
1514 data;
1515 return NULL;
1516}
1517
1518static inline struct il_rxon_context *
1519il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1520{
1521 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1522
1523 return vif_priv->ctx;
1524}
1525
1526#define for_each_context(il, _ctx) \
1527 for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
1528
1529static inline int
1530il_is_associated(struct il_priv *il)
1531{
1532 return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1533}
1534
1535static inline int
1536il_is_any_associated(struct il_priv *il)
1537{
1538 return il_is_associated(il);
1539}
1540
1541static inline int
1542il_is_associated_ctx(struct il_rxon_context *ctx)
1543{
1544 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1545}
1546
1547static inline int
1548il_is_channel_valid(const struct il_channel_info *ch_info)
1549{
1550 if (ch_info == NULL)
1551 return 0;
1552 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1553}
1554
1555static inline int
1556il_is_channel_radar(const struct il_channel_info *ch_info)
1557{
1558 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1559}
1560
1561static inline u8
1562il_is_channel_a_band(const struct il_channel_info *ch_info)
1563{
1564 return ch_info->band == IEEE80211_BAND_5GHZ;
1565}
1566
1567static inline int
1568il_is_channel_passive(const struct il_channel_info *ch)
1569{
1570 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1571}
1572
1573static inline int
1574il_is_channel_ibss(const struct il_channel_info *ch)
1575{
1576 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1577}
1578
1579static inline void
1580__il_free_pages(struct il_priv *il, struct page *page)
1581{
1582 __free_pages(page, il->hw_params.rx_page_order);
1583 il->alloc_rxb_page--;
1584}
1585
1586static inline void
1587il_free_pages(struct il_priv *il, unsigned long page)
1588{
1589 free_pages(page, il->hw_params.rx_page_order);
1590 il->alloc_rxb_page--;
1591}
1592
1593#define IWLWIFI_VERSION "in-tree:"
1594#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
1595#define DRV_AUTHOR "<ilw@linux.intel.com>"
1596
1597#define IL_PCI_DEVICE(dev, subdev, cfg) \
1598 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
1599 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
1600 .driver_data = (kernel_ulong_t)&(cfg)
1601
1602#define TIME_UNIT 1024
1603
1604#define IL_SKU_G 0x1
1605#define IL_SKU_A 0x2
1606#define IL_SKU_N 0x8
1607
1608#define IL_CMD(x) case x: return #x
1609
1610/* Size of one Rx buffer in host DRAM */
1611#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
1612#define IL_RX_BUF_SIZE_4K (4 * 1024)
1613#define IL_RX_BUF_SIZE_8K (8 * 1024)
1614
1615struct il_hcmd_ops {
1616 int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
1617 int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
1618 void (*set_rxon_chain) (struct il_priv *il,
1619 struct il_rxon_context *ctx);
1620};
1621
1622struct il_hcmd_utils_ops {
1623 u16(*get_hcmd_size) (u8 cmd_id, u16 len);
1624 u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
1625 int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
1626 void (*post_scan) (struct il_priv *il);
1627};
1628
1629struct il_apm_ops {
1630 int (*init) (struct il_priv *il);
1631 void (*config) (struct il_priv *il);
1632};
1633
1634#ifdef CONFIG_IWLEGACY_DEBUGFS
1635struct il_debugfs_ops {
1636 ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
1637 size_t count, loff_t *ppos);
1638 ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
1639 size_t count, loff_t *ppos);
1640 ssize_t(*general_stats_read) (struct file *file,
1641 char __user *user_buf, size_t count,
1642 loff_t *ppos);
1643};
1644#endif
1645
1646struct il_temp_ops {
1647 void (*temperature) (struct il_priv *il);
1648};
1649
1650struct il_lib_ops {
1651 /* set hw dependent parameters */
1652 int (*set_hw_params) (struct il_priv *il);
1653 /* Handling TX */
1654 void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
1655 struct il_tx_queue *txq,
1656 u16 byte_cnt);
1657 int (*txq_attach_buf_to_tfd) (struct il_priv *il,
1658 struct il_tx_queue *txq, dma_addr_t addr,
1659 u16 len, u8 reset, u8 pad);
1660 void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
1661 int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
1662 /* setup Rx handler */
1663 void (*handler_setup) (struct il_priv *il);
1664 /* alive notification after init uCode load */
1665 void (*init_alive_start) (struct il_priv *il);
1666 /* check validity of rtc data address */
1667 int (*is_valid_rtc_data_addr) (u32 addr);
1668 /* 1st ucode load */
1669 int (*load_ucode) (struct il_priv *il);
1670
1671 void (*dump_nic_error_log) (struct il_priv *il);
1672 int (*dump_fh) (struct il_priv *il, char **buf, bool display);
1673 int (*set_channel_switch) (struct il_priv *il,
1674 struct ieee80211_channel_switch *ch_switch);
1675 /* power management */
1676 struct il_apm_ops apm_ops;
1677
1678 /* power */
1679 int (*send_tx_power) (struct il_priv *il);
1680 void (*update_chain_flags) (struct il_priv *il);
1681
1682 /* eeprom operations */
1683 struct il_eeprom_ops eeprom_ops;
1684
1685 /* temperature */
1686 struct il_temp_ops temp_ops;
1687
1688#ifdef CONFIG_IWLEGACY_DEBUGFS
1689 struct il_debugfs_ops debugfs_ops;
1690#endif
1691
1692};
1693
1694struct il_led_ops {
1695 int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
1696};
1697
1698struct il_legacy_ops {
1699 void (*post_associate) (struct il_priv *il);
1700 void (*config_ap) (struct il_priv *il);
1701 /* station management */
1702 int (*update_bcast_stations) (struct il_priv *il);
1703 int (*manage_ibss_station) (struct il_priv *il,
1704 struct ieee80211_vif *vif, bool add);
1705};
1706
1707struct il_ops {
1708 const struct il_lib_ops *lib;
1709 const struct il_hcmd_ops *hcmd;
1710 const struct il_hcmd_utils_ops *utils;
1711 const struct il_led_ops *led;
1712 const struct il_nic_ops *nic;
1713 const struct il_legacy_ops *legacy;
1714 const struct ieee80211_ops *ieee80211_ops;
1715};
1716
1717struct il_mod_params {
1718 int sw_crypto; /* def: 0 = using hardware encryption */
1719 int disable_hw_scan; /* def: 0 = use h/w scan */
1720 int num_of_queues; /* def: HW dependent */
1721 int disable_11n; /* def: 0 = 11n capabilities enabled */
1722 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
1723 int antenna; /* def: 0 = both antennas (use diversity) */
1724 int restart_fw; /* def: 1 = restart firmware */
1725};
1726
1727/*
1728 * @led_compensation: compensate on the led on/off time per HW according
1729 * to the deviation to achieve the desired led frequency.
1730 * The detail algorithm is described in common.c
1731 * @chain_noise_num_beacons: number of beacons used to compute chain noise
1732 * @wd_timeout: TX queues watchdog timeout
1733 * @temperature_kelvin: temperature report by uCode in kelvin
1734 * @ucode_tracing: support ucode continuous tracing
1735 * @sensitivity_calib_by_driver: driver has the capability to perform
1736 * sensitivity calibration operation
1737 * @chain_noise_calib_by_driver: driver has the capability to perform
1738 * chain noise calibration operation
1739 */
1740struct il_base_params {
1741 int eeprom_size;
1742 int num_of_queues; /* def: HW dependent */
1743 int num_of_ampdu_queues; /* def: HW dependent */
1744 /* for il_apm_init() */
1745 u32 pll_cfg_val;
1746 bool set_l0s;
1747 bool use_bsm;
1748
1749 u16 led_compensation;
1750 int chain_noise_num_beacons;
1751 unsigned int wd_timeout;
1752 bool temperature_kelvin;
1753 const bool ucode_tracing;
1754 const bool sensitivity_calib_by_driver;
1755 const bool chain_noise_calib_by_driver;
1756};
1757
1758#define IL_LED_SOLID 11
1759#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
1760
1761#define IL_LED_ACTIVITY (0<<1)
1762#define IL_LED_LINK (1<<1)
1763
1764/*
1765 * LED mode
1766 * IL_LED_DEFAULT: use device default
1767 * IL_LED_RF_STATE: turn LED on/off based on RF state
1768 * LED ON = RF ON
1769 * LED OFF = RF OFF
1770 * IL_LED_BLINK: adjust led blink rate based on blink table
1771 */
1772enum il_led_mode {
1773 IL_LED_DEFAULT,
1774 IL_LED_RF_STATE,
1775 IL_LED_BLINK,
1776};
1777
1778void il_leds_init(struct il_priv *il);
1779void il_leds_exit(struct il_priv *il);
1780
1781/**
1782 * struct il_cfg
1783 * @fw_name_pre: Firmware filename prefix. The api version and extension
1784 * (.ucode) will be added to filename before loading from disk. The
1785 * filename is constructed as fw_name_pre<api>.ucode.
1786 * @ucode_api_max: Highest version of uCode API supported by driver.
1787 * @ucode_api_min: Lowest version of uCode API supported by driver.
1788 * @scan_antennas: available antenna for scan operation
1789 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
1790 *
1791 * We enable the driver to be backward compatible wrt API version. The
1792 * driver specifies which APIs it supports (with @ucode_api_max being the
1793 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
1794 * it has a supported API version. The firmware's API version will be
1795 * stored in @il_priv, enabling the driver to make runtime changes based
1796 * on firmware version used.
1797 *
1798 * For example,
1799 * if (IL_UCODE_API(il->ucode_ver) >= 2) {
1800 * Driver interacts with Firmware API version >= 2.
1801 * } else {
1802 * Driver interacts with Firmware API version 1.
1803 * }
1804 *
1805 * The ideal usage of this infrastructure is to treat a new ucode API
1806 * release as a new hardware revision. That is, through utilizing the
1807 * il_hcmd_utils_ops etc. we accommodate different command structures
1808 * and flows between hardware versions as well as their API
1809 * versions.
1810 *
1811 */
1812struct il_cfg {
1813 /* params specific to an individual device within a device family */
1814 const char *name;
1815 const char *fw_name_pre;
1816 const unsigned int ucode_api_max;
1817 const unsigned int ucode_api_min;
1818 u8 valid_tx_ant;
1819 u8 valid_rx_ant;
1820 unsigned int sku;
1821 u16 eeprom_ver;
1822 u16 eeprom_calib_ver;
1823 const struct il_ops *ops;
1824 /* module based parameters which can be set from modprobe cmd */
1825 const struct il_mod_params *mod_params;
1826 /* params not likely to change within a device family */
1827 struct il_base_params *base_params;
1828 /* params likely to change within a device family */
1829 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
1830 enum il_led_mode led_mode;
1831};
1832
1833/***************************
1834 * L i b *
1835 ***************************/
1836
1837struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
1838int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1839 u16 queue, const struct ieee80211_tx_queue_params *params);
1840int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
1841
1842void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
1843 int hw_decrypt);
1844int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
1845int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
1846int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
1847 struct il_rxon_context *ctx);
1848void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
1849 enum ieee80211_band band, struct ieee80211_vif *vif);
1850u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
1851void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
1852bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
1853 struct ieee80211_sta_ht_cap *ht_cap);
1854void il_connection_init_rx_config(struct il_priv *il,
1855 struct il_rxon_context *ctx);
1856void il_set_rate(struct il_priv *il);
1857int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
1858 u32 decrypt_res, struct ieee80211_rx_status *stats);
1859void il_irq_handle_error(struct il_priv *il);
1860int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1861void il_mac_remove_interface(struct ieee80211_hw *hw,
1862 struct ieee80211_vif *vif);
1863int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1864 enum nl80211_iftype newtype, bool newp2p);
1865int il_alloc_txq_mem(struct il_priv *il);
1866void il_txq_mem(struct il_priv *il);
1867
1868#ifdef CONFIG_IWLEGACY_DEBUGFS
1869int il_alloc_traffic_mem(struct il_priv *il);
1870void il_free_traffic_mem(struct il_priv *il);
1871void il_reset_traffic_log(struct il_priv *il);
1872void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
1873 struct ieee80211_hdr *header);
1874void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
1875 struct ieee80211_hdr *header);
1876const char *il_get_mgmt_string(int cmd);
1877const char *il_get_ctrl_string(int cmd);
1878void il_clear_traffic_stats(struct il_priv *il);
1879void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
1880#else
1881static inline int
1882il_alloc_traffic_mem(struct il_priv *il)
1883{
1884 return 0;
1885}
1886
1887static inline void
1888il_free_traffic_mem(struct il_priv *il)
1889{
1890}
1891
1892static inline void
1893il_reset_traffic_log(struct il_priv *il)
1894{
1895}
1896
1897static inline void
1898il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
1899 struct ieee80211_hdr *header)
1900{
1901}
1902
1903static inline void
1904il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
1905 struct ieee80211_hdr *header)
1906{
1907}
1908
1909static inline void
1910il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
1911{
1912}
1913#endif
1914/*****************************************************
1915 * RX handlers.
1916 * **************************************************/
1917void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
1918void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
1919void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
1920
1921/*****************************************************
1922* RX
1923******************************************************/
1924void il_cmd_queue_unmap(struct il_priv *il);
1925void il_cmd_queue_free(struct il_priv *il);
1926int il_rx_queue_alloc(struct il_priv *il);
1927void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
1928int il_rx_queue_space(const struct il_rx_queue *q);
1929void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
1930/* Handlers */
1931void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
1932void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
1933void il_chswitch_done(struct il_priv *il, bool is_success);
1934void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
1935
1936/* TX helpers */
1937
1938/*****************************************************
1939* TX
1940******************************************************/
1941void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
1942int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
1943 u32 txq_id);
1944void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
1945 int slots_num, u32 txq_id);
1946void il_tx_queue_unmap(struct il_priv *il, int txq_id);
1947void il_tx_queue_free(struct il_priv *il, int txq_id);
1948void il_setup_watchdog(struct il_priv *il);
1949/*****************************************************
1950 * TX power
1951 ****************************************************/
1952int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
1953
1954/*******************************************************************************
1955 * Rate
1956 ******************************************************************************/
1957
1958u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
1959
1960/*******************************************************************************
1961 * Scanning
1962 ******************************************************************************/
1963void il_init_scan_params(struct il_priv *il);
1964int il_scan_cancel(struct il_priv *il);
1965int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
1966void il_force_scan_end(struct il_priv *il);
1967int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1968 struct cfg80211_scan_request *req);
1969void il_internal_short_hw_scan(struct il_priv *il);
1970int il_force_reset(struct il_priv *il, bool external);
1971u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1972 const u8 *ta, const u8 *ie, int ie_len, int left);
1973void il_setup_rx_scan_handlers(struct il_priv *il);
1974u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1975 u8 n_probes);
1976u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1977 struct ieee80211_vif *vif);
1978void il_setup_scan_deferred_work(struct il_priv *il);
1979void il_cancel_scan_deferred_work(struct il_priv *il);
1980
1981/* For faster active scanning, scan will move to the next channel if fewer than
1982 * PLCP_QUIET_THRESH packets are heard on this channel within
1983 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
1984 * time if it's a quiet channel (nothing responded to our probe, and there's
1985 * no other traffic).
1986 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
1987#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
1988#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
1989
1990#define IL_SCAN_CHECK_WATCHDOG (HZ * 7)
1991
1992/*****************************************************
1993 * S e n d i n g H o s t C o m m a n d s *
1994 *****************************************************/
1995
1996const char *il_get_cmd_string(u8 cmd);
1997int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
1998int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
1999int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
2000 const void *data);
2001int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
2002 void (*callback) (struct il_priv *il,
2003 struct il_device_cmd *cmd,
2004 struct il_rx_pkt *pkt));
2005
2006int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
2007
2008/*****************************************************
2009 * PCI *
2010 *****************************************************/
2011
2012static inline u16
2013il_pcie_link_ctl(struct il_priv *il)
2014{
2015 int pos;
2016 u16 pci_lnk_ctl;
2017 pos = pci_pcie_cap(il->pci_dev);
2018 pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
2019 return pci_lnk_ctl;
2020}
2021
2022void il_bg_watchdog(unsigned long data);
2023u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
2024__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
2025 u32 beacon_interval);
2026
2027#ifdef CONFIG_PM
2028int il_pci_suspend(struct device *device);
2029int il_pci_resume(struct device *device);
2030extern const struct dev_pm_ops il_pm_ops;
2031
2032#define IL_LEGACY_PM_OPS (&il_pm_ops)
2033
2034#else /* !CONFIG_PM */
2035
2036#define IL_LEGACY_PM_OPS NULL
2037
2038#endif /* !CONFIG_PM */
2039
2040/*****************************************************
2041* Error Handling Debugging
2042******************************************************/
2043void il4965_dump_nic_error_log(struct il_priv *il);
2044#ifdef CONFIG_IWLEGACY_DEBUG
2045void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
2046#else
2047static inline void
2048il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
2049{
2050}
2051#endif
2052
2053void il_clear_isr_stats(struct il_priv *il);
2054
2055/*****************************************************
2056* GEOS
2057******************************************************/
2058int il_init_geos(struct il_priv *il);
2059void il_free_geos(struct il_priv *il);
2060
2061/*************** DRIVER STATUS FUNCTIONS *****/
2062
2063#define S_HCMD_ACTIVE 0 /* host command in progress */
2064/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
2065#define S_INT_ENABLED 2
2066#define S_RF_KILL_HW 3
2067#define S_CT_KILL 4
2068#define S_INIT 5
2069#define S_ALIVE 6
2070#define S_READY 7
2071#define S_TEMPERATURE 8
2072#define S_GEO_CONFIGURED 9
2073#define S_EXIT_PENDING 10
2074#define S_STATS 12
2075#define S_SCANNING 13
2076#define S_SCAN_ABORTING 14
2077#define S_SCAN_HW 15
2078#define S_POWER_PMI 16
2079#define S_FW_ERROR 17
2080#define S_CHANNEL_SWITCH_PENDING 18
2081
2082static inline int
2083il_is_ready(struct il_priv *il)
2084{
2085 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
2086 * set but EXIT_PENDING is not */
2087 return test_bit(S_READY, &il->status) &&
2088 test_bit(S_GEO_CONFIGURED, &il->status) &&
2089 !test_bit(S_EXIT_PENDING, &il->status);
2090}
2091
2092static inline int
2093il_is_alive(struct il_priv *il)
2094{
2095 return test_bit(S_ALIVE, &il->status);
2096}
2097
2098static inline int
2099il_is_init(struct il_priv *il)
2100{
2101 return test_bit(S_INIT, &il->status);
2102}
2103
2104static inline int
2105il_is_rfkill_hw(struct il_priv *il)
2106{
2107 return test_bit(S_RF_KILL_HW, &il->status);
2108}
2109
2110static inline int
2111il_is_rfkill(struct il_priv *il)
2112{
2113 return il_is_rfkill_hw(il);
2114}
2115
2116static inline int
2117il_is_ctkill(struct il_priv *il)
2118{
2119 return test_bit(S_CT_KILL, &il->status);
2120}
2121
2122static inline int
2123il_is_ready_rf(struct il_priv *il)
2124{
2125
2126 if (il_is_rfkill(il))
2127 return 0;
2128
2129 return il_is_ready(il);
2130}
2131
2132extern void il_send_bt_config(struct il_priv *il);
2133extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
2134void il_apm_stop(struct il_priv *il);
2135int il_apm_init(struct il_priv *il);
2136
2137int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
2138static inline int
2139il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
2140{
2141 return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
2142}
2143
2144static inline int
2145il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
2146{
2147 return il->cfg->ops->hcmd->commit_rxon(il, ctx);
2148}
2149
2150static inline const struct ieee80211_supported_band *
2151il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
2152{
2153 return il->hw->wiphy->bands[band];
2154}
2155
2156/* mac80211 handlers */
2157int il_mac_config(struct ieee80211_hw *hw, u32 changed);
2158void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
2159void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2160 struct ieee80211_bss_conf *bss_conf, u32 changes);
2161void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
2162 __le16 fc, __le32 *tx_flags);
2163
2164irqreturn_t il_isr(int irq, void *data);
2165
2166#include <linux/io.h>
2167
2168static inline void
2169_il_write8(struct il_priv *il, u32 ofs, u8 val)
2170{
2171 iowrite8(val, il->hw_base + ofs);
2172}
2173#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
2174
2175static inline void
2176_il_wr(struct il_priv *il, u32 ofs, u32 val)
2177{
2178 iowrite32(val, il->hw_base + ofs);
2179}
2180
2181static inline u32
2182_il_rd(struct il_priv *il, u32 ofs)
2183{
2184 return ioread32(il->hw_base + ofs);
2185}
2186
2187#define IL_POLL_INTERVAL 10 /* microseconds */
2188static inline int
2189_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
2190{
2191 int t = 0;
2192
2193 do {
2194 if ((_il_rd(il, addr) & mask) == (bits & mask))
2195 return t;
2196 udelay(IL_POLL_INTERVAL);
2197 t += IL_POLL_INTERVAL;
2198 } while (t < timeout);
2199
2200 return -ETIMEDOUT;
2201}
2202
2203static inline void
2204_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
2205{
2206 _il_wr(il, reg, _il_rd(il, reg) | mask);
2207}
2208
2209static inline void
2210il_set_bit(struct il_priv *p, u32 r, u32 m)
2211{
2212 unsigned long reg_flags;
2213
2214 spin_lock_irqsave(&p->reg_lock, reg_flags);
2215 _il_set_bit(p, r, m);
2216 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
2217}
2218
2219static inline void
2220_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
2221{
2222 _il_wr(il, reg, _il_rd(il, reg) & ~mask);
2223}
2224
2225static inline void
2226il_clear_bit(struct il_priv *p, u32 r, u32 m)
2227{
2228 unsigned long reg_flags;
2229
2230 spin_lock_irqsave(&p->reg_lock, reg_flags);
2231 _il_clear_bit(p, r, m);
2232 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
2233}
2234
2235static inline int
2236_il_grab_nic_access(struct il_priv *il)
2237{
2238 int ret;
2239 u32 val;
2240
2241 /* this bit wakes up the NIC */
2242 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2243
2244 /*
2245 * These bits say the device is running, and should keep running for
2246 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2247 * but they do not indicate that embedded SRAM is restored yet;
2248 * 3945 and 4965 have volatile SRAM, and must save/restore contents
2249 * to/from host DRAM when sleeping/waking for power-saving.
2250 * Each direction takes approximately 1/4 millisecond; with this
2251 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2252 * series of register accesses are expected (e.g. reading Event Log),
2253 * to keep device from sleeping.
2254 *
2255 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2256 * SRAM is okay/restored. We don't check that here because this call
2257 * is just for hardware register access; but GP1 MAC_SLEEP check is a
2258 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
2259 *
2260 */
2261 ret =
2262 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
2263 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2264 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
2265 if (ret < 0) {
2266 val = _il_rd(il, CSR_GP_CNTRL);
2267 IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
2268 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
2269 return -EIO;
2270 }
2271
2272 return 0;
2273}
2274
2275static inline void
2276_il_release_nic_access(struct il_priv *il)
2277{
2278 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2279}
2280
2281static inline u32
2282il_rd(struct il_priv *il, u32 reg)
2283{
2284 u32 value;
2285 unsigned long reg_flags;
2286
2287 spin_lock_irqsave(&il->reg_lock, reg_flags);
2288 _il_grab_nic_access(il);
2289 value = _il_rd(il, reg);
2290 _il_release_nic_access(il);
2291 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2292 return value;
2293
2294}
2295
2296static inline void
2297il_wr(struct il_priv *il, u32 reg, u32 value)
2298{
2299 unsigned long reg_flags;
2300
2301 spin_lock_irqsave(&il->reg_lock, reg_flags);
2302 if (!_il_grab_nic_access(il)) {
2303 _il_wr(il, reg, value);
2304 _il_release_nic_access(il);
2305 }
2306 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2307}
2308
2309static inline void
2310il_write_reg_buf(struct il_priv *il, u32 reg, u32 len, u32 * values)
2311{
2312 u32 count = sizeof(u32);
2313
2314 if (il != NULL && values != NULL) {
2315 for (; 0 < len; len -= count, reg += count, values++)
2316 il_wr(il, reg, *values);
2317 }
2318}
2319
2320static inline int
2321il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
2322{
2323 int t = 0;
2324
2325 do {
2326 if ((il_rd(il, addr) & mask) == mask)
2327 return t;
2328 udelay(IL_POLL_INTERVAL);
2329 t += IL_POLL_INTERVAL;
2330 } while (t < timeout);
2331
2332 return -ETIMEDOUT;
2333}
2334
2335static inline u32
2336_il_rd_prph(struct il_priv *il, u32 reg)
2337{
2338 _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
2339 rmb();
2340 return _il_rd(il, HBUS_TARG_PRPH_RDAT);
2341}
2342
2343static inline u32
2344il_rd_prph(struct il_priv *il, u32 reg)
2345{
2346 unsigned long reg_flags;
2347 u32 val;
2348
2349 spin_lock_irqsave(&il->reg_lock, reg_flags);
2350 _il_grab_nic_access(il);
2351 val = _il_rd_prph(il, reg);
2352 _il_release_nic_access(il);
2353 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2354 return val;
2355}
2356
2357static inline void
2358_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
2359{
2360 _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
2361 wmb();
2362 _il_wr(il, HBUS_TARG_PRPH_WDAT, val);
2363}
2364
2365static inline void
2366il_wr_prph(struct il_priv *il, u32 addr, u32 val)
2367{
2368 unsigned long reg_flags;
2369
2370 spin_lock_irqsave(&il->reg_lock, reg_flags);
2371 if (!_il_grab_nic_access(il)) {
2372 _il_wr_prph(il, addr, val);
2373 _il_release_nic_access(il);
2374 }
2375 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2376}
2377
2378#define _il_set_bits_prph(il, reg, mask) \
2379_il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask))
2380
2381static inline void
2382il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
2383{
2384 unsigned long reg_flags;
2385
2386 spin_lock_irqsave(&il->reg_lock, reg_flags);
2387 _il_grab_nic_access(il);
2388 _il_set_bits_prph(il, reg, mask);
2389 _il_release_nic_access(il);
2390 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2391}
2392
2393#define _il_set_bits_mask_prph(il, reg, bits, mask) \
2394_il_wr_prph(il, reg, \
2395 ((_il_rd_prph(il, reg) & mask) | bits))
2396
2397static inline void
2398il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
2399{
2400 unsigned long reg_flags;
2401
2402 spin_lock_irqsave(&il->reg_lock, reg_flags);
2403 _il_grab_nic_access(il);
2404 _il_set_bits_mask_prph(il, reg, bits, mask);
2405 _il_release_nic_access(il);
2406 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2407}
2408
2409static inline void
2410il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
2411{
2412 unsigned long reg_flags;
2413 u32 val;
2414
2415 spin_lock_irqsave(&il->reg_lock, reg_flags);
2416 _il_grab_nic_access(il);
2417 val = _il_rd_prph(il, reg);
2418 _il_wr_prph(il, reg, (val & ~mask));
2419 _il_release_nic_access(il);
2420 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2421}
2422
2423static inline u32
2424il_read_targ_mem(struct il_priv *il, u32 addr)
2425{
2426 unsigned long reg_flags;
2427 u32 value;
2428
2429 spin_lock_irqsave(&il->reg_lock, reg_flags);
2430 _il_grab_nic_access(il);
2431
2432 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
2433 rmb();
2434 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
2435
2436 _il_release_nic_access(il);
2437 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2438 return value;
2439}
2440
2441static inline void
2442il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
2443{
2444 unsigned long reg_flags;
2445
2446 spin_lock_irqsave(&il->reg_lock, reg_flags);
2447 if (!_il_grab_nic_access(il)) {
2448 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
2449 wmb();
2450 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
2451 _il_release_nic_access(il);
2452 }
2453 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2454}
2455
2456static inline void
2457il_write_targ_mem_buf(struct il_priv *il, u32 addr, u32 len, u32 * values)
2458{
2459 unsigned long reg_flags;
2460
2461 spin_lock_irqsave(&il->reg_lock, reg_flags);
2462 if (!_il_grab_nic_access(il)) {
2463 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
2464 wmb();
2465 for (; 0 < len; len -= sizeof(u32), values++)
2466 _il_wr(il, HBUS_TARG_MEM_WDAT, *values);
2467
2468 _il_release_nic_access(il);
2469 }
2470 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2471}
2472
2473#define HW_KEY_DYNAMIC 0
2474#define HW_KEY_DEFAULT 1
2475
2476#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
2477#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
2478#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
2479 being activated */
2480#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
2481 (this is for the IBSS BSSID stations) */
2482#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
2483
2484void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
2485void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
2486void il_dealloc_bcast_stations(struct il_priv *il);
2487int il_get_free_ucode_key_idx(struct il_priv *il);
2488int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
2489int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
2490 const u8 *addr, bool is_ap,
2491 struct ieee80211_sta *sta, u8 *sta_id_r);
2492int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
2493int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2494 struct ieee80211_sta *sta);
2495
2496u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
2497 const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
2498
2499int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2500 struct il_link_quality_cmd *lq, u8 flags, bool init);
2501
2502/**
2503 * il_clear_driver_stations - clear knowledge of all stations from driver
2504 * @il: iwl il struct
2505 *
2506 * This is called during il_down() to make sure that in the case
2507 * we're coming there from a hardware restart mac80211 will be
2508 * able to reconfigure stations -- if we're getting there in the
2509 * normal down flow then the stations will already be cleared.
2510 */
2511static inline void
2512il_clear_driver_stations(struct il_priv *il)
2513{
2514 unsigned long flags;
2515 struct il_rxon_context *ctx = &il->ctx;
2516
2517 spin_lock_irqsave(&il->sta_lock, flags);
2518 memset(il->stations, 0, sizeof(il->stations));
2519 il->num_stations = 0;
2520
2521 il->ucode_key_table = 0;
2522
2523 /*
2524 * Remove all key information that is not stored as part
2525 * of station information since mac80211 may not have had
2526 * a chance to remove all the keys. When device is
2527 * reconfigured by mac80211 after an error all keys will
2528 * be reconfigured.
2529 */
2530 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
2531 ctx->key_mapping_keys = 0;
2532
2533 spin_unlock_irqrestore(&il->sta_lock, flags);
2534}
2535
2536static inline int
2537il_sta_id(struct ieee80211_sta *sta)
2538{
2539 if (WARN_ON(!sta))
2540 return IL_INVALID_STATION;
2541
2542 return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
2543}
2544
2545/**
2546 * il_sta_id_or_broadcast - return sta_id or broadcast sta
2547 * @il: iwl il
2548 * @context: the current context
2549 * @sta: mac80211 station
2550 *
2551 * In certain circumstances mac80211 passes a station pointer
2552 * that may be %NULL, for example during TX or key setup. In
2553 * that case, we need to use the broadcast station, so this
2554 * inline wraps that pattern.
2555 */
2556static inline int
2557il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
2558 struct ieee80211_sta *sta)
2559{
2560 int sta_id;
2561
2562 if (!sta)
2563 return context->bcast_sta_id;
2564
2565 sta_id = il_sta_id(sta);
2566
2567 /*
2568 * mac80211 should not be passing a partially
2569 * initialised station!
2570 */
2571 WARN_ON(sta_id == IL_INVALID_STATION);
2572
2573 return sta_id;
2574}
2575
2576/**
2577 * il_queue_inc_wrap - increment queue idx, wrap back to beginning
2578 * @idx -- current idx
2579 * @n_bd -- total number of entries in queue (must be power of 2)
2580 */
2581static inline int
2582il_queue_inc_wrap(int idx, int n_bd)
2583{
2584 return ++idx & (n_bd - 1);
2585}
2586
2587/**
2588 * il_queue_dec_wrap - decrement queue idx, wrap back to end
2589 * @idx -- current idx
2590 * @n_bd -- total number of entries in queue (must be power of 2)
2591 */
2592static inline int
2593il_queue_dec_wrap(int idx, int n_bd)
2594{
2595 return --idx & (n_bd - 1);
2596}
2597
2598/* TODO: Move fw_desc functions to iwl-pci.ko */
2599static inline void
2600il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2601{
2602 if (desc->v_addr)
2603 dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
2604 desc->p_addr);
2605 desc->v_addr = NULL;
2606 desc->len = 0;
2607}
2608
2609static inline int
2610il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2611{
2612 if (!desc->len) {
2613 desc->v_addr = NULL;
2614 return -EINVAL;
2615 }
2616
2617 desc->v_addr =
2618 dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
2619 GFP_KERNEL);
2620 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
2621}
2622
2623/*
2624 * we have 8 bits used like this:
2625 *
2626 * 7 6 5 4 3 2 1 0
2627 * | | | | | | | |
2628 * | | | | | | +-+-------- AC queue (0-3)
2629 * | | | | | |
2630 * | +-+-+-+-+------------ HW queue ID
2631 * |
2632 * +---------------------- unused
2633 */
2634static inline void
2635il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
2636{
2637 BUG_ON(ac > 3); /* only have 2 bits */
2638 BUG_ON(hwq > 31); /* only use 5 bits */
2639
2640 txq->swq_id = (hwq << 2) | ac;
2641}
2642
2643static inline void
2644il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
2645{
2646 u8 queue = txq->swq_id;
2647 u8 ac = queue & 3;
2648 u8 hwq = (queue >> 2) & 0x1f;
2649
2650 if (test_and_clear_bit(hwq, il->queue_stopped))
2651 if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
2652 ieee80211_wake_queue(il->hw, ac);
2653}
2654
2655static inline void
2656il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
2657{
2658 u8 queue = txq->swq_id;
2659 u8 ac = queue & 3;
2660 u8 hwq = (queue >> 2) & 0x1f;
2661
2662 if (!test_and_set_bit(hwq, il->queue_stopped))
2663 if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
2664 ieee80211_stop_queue(il->hw, ac);
2665}
2666
2667#ifdef ieee80211_stop_queue
2668#undef ieee80211_stop_queue
2669#endif
2670
2671#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
2672
2673#ifdef ieee80211_wake_queue
2674#undef ieee80211_wake_queue
2675#endif
2676
2677#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
2678
2679static inline void
2680il_disable_interrupts(struct il_priv *il)
2681{
2682 clear_bit(S_INT_ENABLED, &il->status);
2683
2684 /* disable interrupts from uCode/NIC to host */
2685 _il_wr(il, CSR_INT_MASK, 0x00000000);
2686
2687 /* acknowledge/clear/reset any interrupts still pending
2688 * from uCode or flow handler (Rx/Tx DMA) */
2689 _il_wr(il, CSR_INT, 0xffffffff);
2690 _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
2691}
2692
2693static inline void
2694il_enable_rfkill_int(struct il_priv *il)
2695{
2696 _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
2697}
2698
2699static inline void
2700il_enable_interrupts(struct il_priv *il)
2701{
2702 set_bit(S_INT_ENABLED, &il->status);
2703 _il_wr(il, CSR_INT_MASK, il->inta_mask);
2704}
2705
2706/**
2707 * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
2708 * @il -- pointer to il_priv data structure
2709 * @tsf_bits -- number of bits need to shift for masking)
2710 */
2711static inline u32
2712il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
2713{
2714 return (1 << tsf_bits) - 1;
2715}
2716
2717/**
2718 * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
2719 * @il -- pointer to il_priv data structure
2720 * @tsf_bits -- number of bits need to shift for masking)
2721 */
2722static inline u32
2723il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
2724{
2725 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
2726}
2727
2728/**
2729 * struct il_rb_status - reseve buffer status host memory mapped FH registers
2730 *
2731 * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
2732 * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
2733 * @finished_rb_num [0:11] - Indicates the idx of the current RB
2734 * in which the last frame was written to
2735 * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
2736 * which was transferred
2737 */
2738struct il_rb_status {
2739 __le16 closed_rb_num;
2740 __le16 closed_fr_num;
2741 __le16 finished_rb_num;
2742 __le16 finished_fr_nam;
2743 __le32 __unused; /* 3945 only */
2744} __packed;
2745
2746#define TFD_QUEUE_SIZE_MAX (256)
2747#define TFD_QUEUE_SIZE_BC_DUP (64)
2748#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
2749#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
2750#define IL_NUM_OF_TBS 20
2751
2752static inline u8
2753il_get_dma_hi_addr(dma_addr_t addr)
2754{
2755 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
2756}
2757
2758/**
2759 * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
2760 *
2761 * This structure contains dma address and length of transmission address
2762 *
2763 * @lo: low [31:0] portion of the dma address of TX buffer every even is
2764 * unaligned on 16 bit boundary
2765 * @hi_n_len: 0-3 [35:32] portion of dma
2766 * 4-15 length of the tx buffer
2767 */
2768struct il_tfd_tb {
2769 __le32 lo;
2770 __le16 hi_n_len;
2771} __packed;
2772
2773/**
2774 * struct il_tfd
2775 *
2776 * Transmit Frame Descriptor (TFD)
2777 *
2778 * @ __reserved1[3] reserved
2779 * @ num_tbs 0-4 number of active tbs
2780 * 5 reserved
2781 * 6-7 padding (not used)
2782 * @ tbs[20] transmit frame buffer descriptors
2783 * @ __pad padding
2784 *
2785 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
2786 * Both driver and device share these circular buffers, each of which must be
2787 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
2788 *
2789 * Driver must indicate the physical address of the base of each
2790 * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
2791 *
2792 * Each TFD contains pointer/size information for up to 20 data buffers
2793 * in host DRAM. These buffers collectively contain the (one) frame described
2794 * by the TFD. Each buffer must be a single contiguous block of memory within
2795 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
2796 * of (4K - 4). The concatenates all of a TFD's buffers into a single
2797 * Tx frame, up to 8 KBytes in size.
2798 *
2799 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
2800 */
2801struct il_tfd {
2802 u8 __reserved1[3];
2803 u8 num_tbs;
2804 struct il_tfd_tb tbs[IL_NUM_OF_TBS];
2805 __le32 __pad;
2806} __packed;
2807/* PCI registers */
2808#define PCI_CFG_RETRY_TIMEOUT 0x041
2809
2810/* PCI register values */
2811#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
2812#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
2813
2814struct il_rate_info {
2815 u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
2816 u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
2817 u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */
2818 u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
2819 u8 prev_ieee; /* previous rate in IEEE speeds */
2820 u8 next_ieee; /* next rate in IEEE speeds */
2821 u8 prev_rs; /* previous rate used in rs algo */
2822 u8 next_rs; /* next rate used in rs algo */
2823 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
2824 u8 next_rs_tgg; /* next rate used in TGG rs algo */
2825};
2826
2827struct il3945_rate_info {
2828 u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
2829 u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
2830 u8 prev_ieee; /* previous rate in IEEE speeds */
2831 u8 next_ieee; /* next rate in IEEE speeds */
2832 u8 prev_rs; /* previous rate used in rs algo */
2833 u8 next_rs; /* next rate used in rs algo */
2834 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
2835 u8 next_rs_tgg; /* next rate used in TGG rs algo */
2836 u8 table_rs_idx; /* idx in rate scale table cmd */
2837 u8 prev_table_rs; /* prev in rate table cmd */
2838};
2839
2840/*
2841 * These serve as idxes into
2842 * struct il_rate_info il_rates[RATE_COUNT];
2843 */
2844enum {
2845 RATE_1M_IDX = 0,
2846 RATE_2M_IDX,
2847 RATE_5M_IDX,
2848 RATE_11M_IDX,
2849 RATE_6M_IDX,
2850 RATE_9M_IDX,
2851 RATE_12M_IDX,
2852 RATE_18M_IDX,
2853 RATE_24M_IDX,
2854 RATE_36M_IDX,
2855 RATE_48M_IDX,
2856 RATE_54M_IDX,
2857 RATE_60M_IDX,
2858 RATE_COUNT,
2859 RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */
2860 RATE_COUNT_3945 = RATE_COUNT - 1,
2861 RATE_INVM_IDX = RATE_COUNT,
2862 RATE_INVALID = RATE_COUNT,
2863};
2864
2865enum {
2866 RATE_6M_IDX_TBL = 0,
2867 RATE_9M_IDX_TBL,
2868 RATE_12M_IDX_TBL,
2869 RATE_18M_IDX_TBL,
2870 RATE_24M_IDX_TBL,
2871 RATE_36M_IDX_TBL,
2872 RATE_48M_IDX_TBL,
2873 RATE_54M_IDX_TBL,
2874 RATE_1M_IDX_TBL,
2875 RATE_2M_IDX_TBL,
2876 RATE_5M_IDX_TBL,
2877 RATE_11M_IDX_TBL,
2878 RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
2879};
2880
2881enum {
2882 IL_FIRST_OFDM_RATE = RATE_6M_IDX,
2883 IL39_LAST_OFDM_RATE = RATE_54M_IDX,
2884 IL_LAST_OFDM_RATE = RATE_60M_IDX,
2885 IL_FIRST_CCK_RATE = RATE_1M_IDX,
2886 IL_LAST_CCK_RATE = RATE_11M_IDX,
2887};
2888
2889/* #define vs. enum to keep from defaulting to 'large integer' */
2890#define RATE_6M_MASK (1 << RATE_6M_IDX)
2891#define RATE_9M_MASK (1 << RATE_9M_IDX)
2892#define RATE_12M_MASK (1 << RATE_12M_IDX)
2893#define RATE_18M_MASK (1 << RATE_18M_IDX)
2894#define RATE_24M_MASK (1 << RATE_24M_IDX)
2895#define RATE_36M_MASK (1 << RATE_36M_IDX)
2896#define RATE_48M_MASK (1 << RATE_48M_IDX)
2897#define RATE_54M_MASK (1 << RATE_54M_IDX)
2898#define RATE_60M_MASK (1 << RATE_60M_IDX)
2899#define RATE_1M_MASK (1 << RATE_1M_IDX)
2900#define RATE_2M_MASK (1 << RATE_2M_IDX)
2901#define RATE_5M_MASK (1 << RATE_5M_IDX)
2902#define RATE_11M_MASK (1 << RATE_11M_IDX)
2903
2904/* uCode API values for legacy bit rates, both OFDM and CCK */
2905enum {
2906 RATE_6M_PLCP = 13,
2907 RATE_9M_PLCP = 15,
2908 RATE_12M_PLCP = 5,
2909 RATE_18M_PLCP = 7,
2910 RATE_24M_PLCP = 9,
2911 RATE_36M_PLCP = 11,
2912 RATE_48M_PLCP = 1,
2913 RATE_54M_PLCP = 3,
2914 RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */
2915 RATE_1M_PLCP = 10,
2916 RATE_2M_PLCP = 20,
2917 RATE_5M_PLCP = 55,
2918 RATE_11M_PLCP = 110,
2919 /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
2920};
2921
2922/* uCode API values for OFDM high-throughput (HT) bit rates */
2923enum {
2924 RATE_SISO_6M_PLCP = 0,
2925 RATE_SISO_12M_PLCP = 1,
2926 RATE_SISO_18M_PLCP = 2,
2927 RATE_SISO_24M_PLCP = 3,
2928 RATE_SISO_36M_PLCP = 4,
2929 RATE_SISO_48M_PLCP = 5,
2930 RATE_SISO_54M_PLCP = 6,
2931 RATE_SISO_60M_PLCP = 7,
2932 RATE_MIMO2_6M_PLCP = 0x8,
2933 RATE_MIMO2_12M_PLCP = 0x9,
2934 RATE_MIMO2_18M_PLCP = 0xa,
2935 RATE_MIMO2_24M_PLCP = 0xb,
2936 RATE_MIMO2_36M_PLCP = 0xc,
2937 RATE_MIMO2_48M_PLCP = 0xd,
2938 RATE_MIMO2_54M_PLCP = 0xe,
2939 RATE_MIMO2_60M_PLCP = 0xf,
2940 RATE_SISO_INVM_PLCP,
2941 RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
2942};
2943
2944/* MAC header values for bit rates */
2945enum {
2946 RATE_6M_IEEE = 12,
2947 RATE_9M_IEEE = 18,
2948 RATE_12M_IEEE = 24,
2949 RATE_18M_IEEE = 36,
2950 RATE_24M_IEEE = 48,
2951 RATE_36M_IEEE = 72,
2952 RATE_48M_IEEE = 96,
2953 RATE_54M_IEEE = 108,
2954 RATE_60M_IEEE = 120,
2955 RATE_1M_IEEE = 2,
2956 RATE_2M_IEEE = 4,
2957 RATE_5M_IEEE = 11,
2958 RATE_11M_IEEE = 22,
2959};
2960
2961#define IL_CCK_BASIC_RATES_MASK \
2962 (RATE_1M_MASK | \
2963 RATE_2M_MASK)
2964
2965#define IL_CCK_RATES_MASK \
2966 (IL_CCK_BASIC_RATES_MASK | \
2967 RATE_5M_MASK | \
2968 RATE_11M_MASK)
2969
2970#define IL_OFDM_BASIC_RATES_MASK \
2971 (RATE_6M_MASK | \
2972 RATE_12M_MASK | \
2973 RATE_24M_MASK)
2974
2975#define IL_OFDM_RATES_MASK \
2976 (IL_OFDM_BASIC_RATES_MASK | \
2977 RATE_9M_MASK | \
2978 RATE_18M_MASK | \
2979 RATE_36M_MASK | \
2980 RATE_48M_MASK | \
2981 RATE_54M_MASK)
2982
2983#define IL_BASIC_RATES_MASK \
2984 (IL_OFDM_BASIC_RATES_MASK | \
2985 IL_CCK_BASIC_RATES_MASK)
2986
2987#define RATES_MASK ((1 << RATE_COUNT) - 1)
2988#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
2989
2990#define IL_INVALID_VALUE -1
2991
2992#define IL_MIN_RSSI_VAL -100
2993#define IL_MAX_RSSI_VAL 0
2994
2995/* These values specify how many Tx frame attempts before
2996 * searching for a new modulation mode */
2997#define IL_LEGACY_FAILURE_LIMIT 160
2998#define IL_LEGACY_SUCCESS_LIMIT 480
2999#define IL_LEGACY_TBL_COUNT 160
3000
3001#define IL_NONE_LEGACY_FAILURE_LIMIT 400
3002#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500
3003#define IL_NONE_LEGACY_TBL_COUNT 1500
3004
3005/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
3006#define IL_RS_GOOD_RATIO 12800 /* 100% */
3007#define RATE_SCALE_SWITCH 10880 /* 85% */
3008#define RATE_HIGH_TH 10880 /* 85% */
3009#define RATE_INCREASE_TH 6400 /* 50% */
3010#define RATE_DECREASE_TH 1920 /* 15% */
3011
3012/* possible actions when in legacy mode */
3013#define IL_LEGACY_SWITCH_ANTENNA1 0
3014#define IL_LEGACY_SWITCH_ANTENNA2 1
3015#define IL_LEGACY_SWITCH_SISO 2
3016#define IL_LEGACY_SWITCH_MIMO2_AB 3
3017#define IL_LEGACY_SWITCH_MIMO2_AC 4
3018#define IL_LEGACY_SWITCH_MIMO2_BC 5
3019
3020/* possible actions when in siso mode */
3021#define IL_SISO_SWITCH_ANTENNA1 0
3022#define IL_SISO_SWITCH_ANTENNA2 1
3023#define IL_SISO_SWITCH_MIMO2_AB 2
3024#define IL_SISO_SWITCH_MIMO2_AC 3
3025#define IL_SISO_SWITCH_MIMO2_BC 4
3026#define IL_SISO_SWITCH_GI 5
3027
3028/* possible actions when in mimo mode */
3029#define IL_MIMO2_SWITCH_ANTENNA1 0
3030#define IL_MIMO2_SWITCH_ANTENNA2 1
3031#define IL_MIMO2_SWITCH_SISO_A 2
3032#define IL_MIMO2_SWITCH_SISO_B 3
3033#define IL_MIMO2_SWITCH_SISO_C 4
3034#define IL_MIMO2_SWITCH_GI 5
3035
3036#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
3037
3038#define IL_ACTION_LIMIT 3 /* # possible actions */
3039
3040#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
3041
3042/* load per tid defines for A-MPDU activation */
3043#define IL_AGG_TPT_THREHOLD 0
3044#define IL_AGG_LOAD_THRESHOLD 10
3045#define IL_AGG_ALL_TID 0xff
3046#define TID_QUEUE_CELL_SPACING 50 /*mS */
3047#define TID_QUEUE_MAX_SIZE 20
3048#define TID_ROUND_VALUE 5 /* mS */
3049#define TID_MAX_LOAD_COUNT 8
3050
3051#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
3052#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
3053
3054extern const struct il_rate_info il_rates[RATE_COUNT];
3055
3056enum il_table_type {
3057 LQ_NONE,
3058 LQ_G, /* legacy types */
3059 LQ_A,
3060 LQ_SISO, /* high-throughput types */
3061 LQ_MIMO2,
3062 LQ_MAX,
3063};
3064
3065#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
3066#define is_siso(tbl) ((tbl) == LQ_SISO)
3067#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
3068#define is_mimo(tbl) (is_mimo2(tbl))
3069#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
3070#define is_a_band(tbl) ((tbl) == LQ_A)
3071#define is_g_and(tbl) ((tbl) == LQ_G)
3072
3073#define ANT_NONE 0x0
3074#define ANT_A BIT(0)
3075#define ANT_B BIT(1)
3076#define ANT_AB (ANT_A | ANT_B)
3077#define ANT_C BIT(2)
3078#define ANT_AC (ANT_A | ANT_C)
3079#define ANT_BC (ANT_B | ANT_C)
3080#define ANT_ABC (ANT_AB | ANT_C)
3081
3082#define IL_MAX_MCS_DISPLAY_SIZE 12
3083
3084struct il_rate_mcs_info {
3085 char mbps[IL_MAX_MCS_DISPLAY_SIZE];
3086 char mcs[IL_MAX_MCS_DISPLAY_SIZE];
3087};
3088
3089/**
3090 * struct il_rate_scale_data -- tx success history for one rate
3091 */
3092struct il_rate_scale_data {
3093 u64 data; /* bitmap of successful frames */
3094 s32 success_counter; /* number of frames successful */
3095 s32 success_ratio; /* per-cent * 128 */
3096 s32 counter; /* number of frames attempted */
3097 s32 average_tpt; /* success ratio * expected throughput */
3098 unsigned long stamp;
3099};
3100
3101/**
3102 * struct il_scale_tbl_info -- tx params and success history for all rates
3103 *
3104 * There are two of these in struct il_lq_sta,
3105 * one for "active", and one for "search".
3106 */
3107struct il_scale_tbl_info {
3108 enum il_table_type lq_type;
3109 u8 ant_type;
3110 u8 is_SGI; /* 1 = short guard interval */
3111 u8 is_ht40; /* 1 = 40 MHz channel width */
3112 u8 is_dup; /* 1 = duplicated data streams */
3113 u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
3114 u8 max_search; /* maximun number of tables we can search */
3115 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
3116 u32 current_rate; /* rate_n_flags, uCode API format */
3117 struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */
3118};
3119
3120struct il_traffic_load {
3121 unsigned long time_stamp; /* age of the oldest stats */
3122 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
3123 * slice */
3124 u32 total; /* total num of packets during the
3125 * last TID_MAX_TIME_DIFF */
3126 u8 queue_count; /* number of queues that has
3127 * been used since the last cleanup */
3128 u8 head; /* start of the circular buffer */
3129};
3130
3131/**
3132 * struct il_lq_sta -- driver's rate scaling ilate structure
3133 *
3134 * Pointer to this gets passed back and forth between driver and mac80211.
3135 */
3136struct il_lq_sta {
3137 u8 active_tbl; /* idx of active table, range 0-1 */
3138 u8 enable_counter; /* indicates HT mode */
3139 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
3140 u8 search_better_tbl; /* 1: currently trying alternate mode */
3141 s32 last_tpt;
3142
3143 /* The following determine when to search for a new mode */
3144 u32 table_count_limit;
3145 u32 max_failure_limit; /* # failed frames before new search */
3146 u32 max_success_limit; /* # successful frames before new search */
3147 u32 table_count;
3148 u32 total_failed; /* total failed frames, any/all rates */
3149 u32 total_success; /* total successful frames, any/all rates */
3150 u64 flush_timer; /* time staying in mode before new search */
3151
3152 u8 action_counter; /* # mode-switch actions tried */
3153 u8 is_green;
3154 u8 is_dup;
3155 enum ieee80211_band band;
3156
3157 /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
3158 u32 supp_rates;
3159 u16 active_legacy_rate;
3160 u16 active_siso_rate;
3161 u16 active_mimo2_rate;
3162 s8 max_rate_idx; /* Max rate set by user */
3163 u8 missed_rate_counter;
3164
3165 struct il_link_quality_cmd lq;
3166 struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
3167 struct il_traffic_load load[TID_MAX_LOAD_COUNT];
3168 u8 tx_agg_tid_en;
3169#ifdef CONFIG_MAC80211_DEBUGFS
3170 struct dentry *rs_sta_dbgfs_scale_table_file;
3171 struct dentry *rs_sta_dbgfs_stats_table_file;
3172 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
3173 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
3174 u32 dbg_fixed_rate;
3175#endif
3176 struct il_priv *drv;
3177
3178 /* used to be in sta_info */
3179 int last_txrate_idx;
3180 /* last tx rate_n_flags */
3181 u32 last_rate_n_flags;
3182 /* packets destined for this STA are aggregated */
3183 u8 is_agg;
3184};
3185
3186/*
3187 * il_station_priv: Driver's ilate station information
3188 *
3189 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
3190 * in the structure for use by driver. This structure is places in that
3191 * space.
3192 *
3193 * The common struct MUST be first because it is shared between
3194 * 3945 and 4965!
3195 */
3196struct il_station_priv {
3197 struct il_station_priv_common common;
3198 struct il_lq_sta lq_sta;
3199 atomic_t pending_frames;
3200 bool client;
3201 bool asleep;
3202};
3203
3204static inline u8
3205il4965_num_of_ant(u8 m)
3206{
3207 return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
3208}
3209
3210static inline u8
3211il4965_first_antenna(u8 mask)
3212{
3213 if (mask & ANT_A)
3214 return ANT_A;
3215 if (mask & ANT_B)
3216 return ANT_B;
3217 return ANT_C;
3218}
3219
3220/**
3221 * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
3222 *
3223 * The specific throughput table used is based on the type of network
3224 * the associated with, including A, B, G, and G w/ TGG protection
3225 */
3226extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
3227
3228/* Initialize station's rate scaling information after adding station */
3229extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
3230 u8 sta_id);
3231extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
3232 u8 sta_id);
3233
3234/**
3235 * il_rate_control_register - Register the rate control algorithm callbacks
3236 *
3237 * Since the rate control algorithm is hardware specific, there is no need
3238 * or reason to place it as a stand alone module. The driver can call
3239 * il_rate_control_register in order to register the rate control callbacks
3240 * with the mac80211 subsystem. This should be performed prior to calling
3241 * ieee80211_register_hw
3242 *
3243 */
3244extern int il4965_rate_control_register(void);
3245extern int il3945_rate_control_register(void);
3246
3247/**
3248 * il_rate_control_unregister - Unregister the rate control callbacks
3249 *
3250 * This should be called after calling ieee80211_unregister_hw, but before
3251 * the driver is unloaded.
3252 */
3253extern void il4965_rate_control_unregister(void);
3254extern void il3945_rate_control_unregister(void);
3255
3256extern int il_power_update_mode(struct il_priv *il, bool force);
3257extern void il_power_initialize(struct il_priv *il);
3258
3259extern u32 il_debug_level;
3260
3261#ifdef CONFIG_IWLEGACY_DEBUG
3262/*
3263 * il_get_debug_level: Return active debug level for device
3264 *
3265 * Using sysfs it is possible to set per device debug level. This debug
3266 * level will be used if set, otherwise the global debug level which can be
3267 * set via module parameter is used.
3268 */
3269static inline u32
3270il_get_debug_level(struct il_priv *il)
3271{
3272 if (il->debug_level)
3273 return il->debug_level;
3274 else
3275 return il_debug_level;
3276}
3277#else
3278static inline u32
3279il_get_debug_level(struct il_priv *il)
3280{
3281 return il_debug_level;
3282}
3283#endif
3284
3285#define il_print_hex_error(il, p, len) \
3286do { \
3287 print_hex_dump(KERN_ERR, "iwl data: ", \
3288 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
3289} while (0)
3290
3291#ifdef CONFIG_IWLEGACY_DEBUG
3292#define IL_DBG(level, fmt, args...) \
3293do { \
3294 if (il_get_debug_level(il) & level) \
3295 dev_printk(KERN_ERR, &il->hw->wiphy->dev, \
3296 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
3297 __func__ , ## args); \
3298} while (0)
3299
3300#define il_print_hex_dump(il, level, p, len) \
3301do { \
3302 if (il_get_debug_level(il) & level) \
3303 print_hex_dump(KERN_DEBUG, "iwl data: ", \
3304 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
3305} while (0)
3306
3307#else
3308#define IL_DBG(level, fmt, args...)
3309static inline void
3310il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
3311{
3312}
3313#endif /* CONFIG_IWLEGACY_DEBUG */
3314
3315#ifdef CONFIG_IWLEGACY_DEBUGFS
3316int il_dbgfs_register(struct il_priv *il, const char *name);
3317void il_dbgfs_unregister(struct il_priv *il);
3318#else
3319static inline int
3320il_dbgfs_register(struct il_priv *il, const char *name)
3321{
3322 return 0;
3323}
3324
3325static inline void
3326il_dbgfs_unregister(struct il_priv *il)
3327{
3328}
3329#endif /* CONFIG_IWLEGACY_DEBUGFS */
3330
3331/*
3332 * To use the debug system:
3333 *
3334 * If you are defining a new debug classification, simply add it to the #define
3335 * list here in the form of
3336 *
3337 * #define IL_DL_xxxx VALUE
3338 *
3339 * where xxxx should be the name of the classification (for example, WEP).
3340 *
3341 * You then need to either add a IL_xxxx_DEBUG() macro definition for your
3342 * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
3343 * to send output to that classification.
3344 *
3345 * The active debug levels can be accessed via files
3346 *
3347 * /sys/module/iwl4965/parameters/debug
3348 * /sys/module/iwl3945/parameters/debug
3349 * /sys/class/net/wlan0/device/debug_level
3350 *
3351 * when CONFIG_IWLEGACY_DEBUG=y.
3352 */
3353
3354/* 0x0000000F - 0x00000001 */
3355#define IL_DL_INFO (1 << 0)
3356#define IL_DL_MAC80211 (1 << 1)
3357#define IL_DL_HCMD (1 << 2)
3358#define IL_DL_STATE (1 << 3)
3359/* 0x000000F0 - 0x00000010 */
3360#define IL_DL_MACDUMP (1 << 4)
3361#define IL_DL_HCMD_DUMP (1 << 5)
3362#define IL_DL_EEPROM (1 << 6)
3363#define IL_DL_RADIO (1 << 7)
3364/* 0x00000F00 - 0x00000100 */
3365#define IL_DL_POWER (1 << 8)
3366#define IL_DL_TEMP (1 << 9)
3367#define IL_DL_NOTIF (1 << 10)
3368#define IL_DL_SCAN (1 << 11)
3369/* 0x0000F000 - 0x00001000 */
3370#define IL_DL_ASSOC (1 << 12)
3371#define IL_DL_DROP (1 << 13)
3372#define IL_DL_TXPOWER (1 << 14)
3373#define IL_DL_AP (1 << 15)
3374/* 0x000F0000 - 0x00010000 */
3375#define IL_DL_FW (1 << 16)
3376#define IL_DL_RF_KILL (1 << 17)
3377#define IL_DL_FW_ERRORS (1 << 18)
3378#define IL_DL_LED (1 << 19)
3379/* 0x00F00000 - 0x00100000 */
3380#define IL_DL_RATE (1 << 20)
3381#define IL_DL_CALIB (1 << 21)
3382#define IL_DL_WEP (1 << 22)
3383#define IL_DL_TX (1 << 23)
3384/* 0x0F000000 - 0x01000000 */
3385#define IL_DL_RX (1 << 24)
3386#define IL_DL_ISR (1 << 25)
3387#define IL_DL_HT (1 << 26)
3388/* 0xF0000000 - 0x10000000 */
3389#define IL_DL_11H (1 << 28)
3390#define IL_DL_STATS (1 << 29)
3391#define IL_DL_TX_REPLY (1 << 30)
3392#define IL_DL_QOS (1 << 31)
3393
3394#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a)
3395#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a)
3396#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a)
3397#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a)
3398#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a)
3399#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a)
3400#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a)
3401#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a)
3402#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a)
3403#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a)
3404#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a)
3405#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
3406#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a)
3407#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a)
3408#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a)
3409#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a)
3410#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a)
3411#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a)
3412#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a)
3413#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a)
3414#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a)
3415#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a)
3416#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a)
3417#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a)
3418#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a)
3419#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a)
3420#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a)
3421#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a)
3422#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a)
3423
3424#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/csr.h
index 668a9616c269..9138e15004fa 100644
--- a/drivers/net/wireless/iwlegacy/iwl-csr.h
+++ b/drivers/net/wireless/iwlegacy/csr.h
@@ -60,8 +60,8 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#ifndef __iwl_legacy_csr_h__ 63#ifndef __il_csr_h__
64#define __iwl_legacy_csr_h__ 64#define __il_csr_h__
65/* 65/*
66 * CSR (control and status registers) 66 * CSR (control and status registers)
67 * 67 *
@@ -70,9 +70,9 @@
70 * low power states due to driver-invoked device resets 70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. 71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 * 72 *
73 * Use iwl_write32() and iwl_read32() family to access these registers; 73 * Use _il_wr() and _il_rd() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC. 74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use iwl_legacy_write_direct32() family for these registers; 75 * Do not use il_wr() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. 76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing 77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers. 78 * the CSR registers.
@@ -82,16 +82,16 @@
82 */ 82 */
83#define CSR_BASE (0x000) 83#define CSR_BASE (0x000)
84 84
85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ 85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ 86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ 87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ 88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ 89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */
90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ 90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ 91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */
92#define CSR_GP_CNTRL (CSR_BASE+0x024) 92#define CSR_GP_CNTRL (CSR_BASE+0x024)
93 93
94/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ 94/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) 95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
96 96
97/* 97/*
@@ -166,26 +166,26 @@
166 166
167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) 167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ 169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ 170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ 171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
172 172
173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ 173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */
174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ 174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */
175 175
176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), 176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
177 * acknowledged (reset) by host writing "1" to flagged bits. */ 177 * acknowledged (reset) by host writing "1" to flagged bits. */
178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ 178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ 179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ 180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ 181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ 182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ 183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ 184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ 185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ 186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ 187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ 188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
189 189
190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ 190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
191 CSR_INT_BIT_HW_ERR | \ 191 CSR_INT_BIT_HW_ERR | \
@@ -197,21 +197,20 @@
197 CSR_INT_BIT_ALIVE) 197 CSR_INT_BIT_ALIVE)
198 198
199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ 199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ 200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ 201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ 202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ 203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ 204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ 205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ 206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ 207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
208 208
209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ 209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
210 CSR39_FH_INT_BIT_RX_CHNL2 | \ 210 CSR39_FH_INT_BIT_RX_CHNL2 | \
211 CSR_FH_INT_BIT_RX_CHNL1 | \ 211 CSR_FH_INT_BIT_RX_CHNL1 | \
212 CSR_FH_INT_BIT_RX_CHNL0) 212 CSR_FH_INT_BIT_RX_CHNL0)
213 213
214
215#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \ 214#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
216 CSR_FH_INT_BIT_TX_CHNL1 | \ 215 CSR_FH_INT_BIT_TX_CHNL1 | \
217 CSR_FH_INT_BIT_TX_CHNL0) 216 CSR_FH_INT_BIT_TX_CHNL0)
@@ -285,7 +284,6 @@
285#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) 284#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
286#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) 285#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
287 286
288
289/* EEPROM REG */ 287/* EEPROM REG */
290#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 288#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
291#define CSR_EEPROM_REG_BIT_CMD (0x00000002) 289#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -293,19 +291,18 @@
293#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) 291#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
294 292
295/* EEPROM GP */ 293/* EEPROM GP */
296#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ 294#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
297#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) 295#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
298#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) 296#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
299#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) 297#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
300 298
301/* GP REG */ 299/* GP REG */
302#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ 300#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
303#define CSR_GP_REG_NO_POWER_SAVE (0x00000000) 301#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
304#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) 302#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
305#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) 303#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
306#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) 304#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
307 305
308
309/* CSR GIO */ 306/* CSR GIO */
310#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) 307#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
311 308
@@ -357,7 +354,7 @@
357/* HPET MEM debug */ 354/* HPET MEM debug */
358#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) 355#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
359 356
360/* DRAM INT TABLE */ 357/* DRAM INT TBL */
361#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) 358#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
362#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) 359#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
363 360
@@ -368,13 +365,13 @@
368 * to indirectly access device's internal memory or registers that 365 * to indirectly access device's internal memory or registers that
369 * may be powered-down. 366 * may be powered-down.
370 * 367 *
371 * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family 368 * Use il_wr()/il_rd() family
372 * for these registers; 369 * for these registers;
373 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ 370 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
374 * to make sure the MAC (uCode processor, etc.) is powered up for accessing 371 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
375 * internal resources. 372 * internal resources.
376 * 373 *
377 * Do not use iwl_write32()/iwl_read32() family to access these registers; 374 * Do not use _il_wr()/_il_rd() family to access these registers;
378 * these provide only simple PCI bus access, without waking up the MAC. 375 * these provide only simple PCI bus access, without waking up the MAC.
379 */ 376 */
380#define HBUS_BASE (0x400) 377#define HBUS_BASE (0x400)
@@ -411,12 +408,12 @@
411#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) 408#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
412 409
413/* 410/*
414 * Per-Tx-queue write pointer (index, really!) 411 * Per-Tx-queue write pointer (idx, really!)
415 * Indicates index to next TFD that driver will fill (1 past latest filled). 412 * Indicates idx to next TFD that driver will fill (1 past latest filled).
416 * Bit usage: 413 * Bit usage:
417 * 0-7: queue write index 414 * 0-7: queue write idx
418 * 11-8: queue selector 415 * 11-8: queue selector
419 */ 416 */
420#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) 417#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
421 418
422#endif /* !__iwl_legacy_csr_h__ */ 419#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
new file mode 100644
index 000000000000..b1b8926a9c7b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -0,0 +1,1411 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <linux/export.h>
30#include <net/mac80211.h>
31
32#include "common.h"
33
34/* create and remove of files */
35#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
36 if (!debugfs_create_file(#name, mode, parent, il, \
37 &il_dbgfs_##name##_ops)) \
38 goto err; \
39} while (0)
40
41#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
42 struct dentry *__tmp; \
43 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
44 parent, ptr); \
45 if (IS_ERR(__tmp) || !__tmp) \
46 goto err; \
47} while (0)
48
49#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
50 struct dentry *__tmp; \
51 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
52 parent, ptr); \
53 if (IS_ERR(__tmp) || !__tmp) \
54 goto err; \
55} while (0)
56
57/* file operation */
58#define DEBUGFS_READ_FUNC(name) \
59static ssize_t il_dbgfs_##name##_read(struct file *file, \
60 char __user *user_buf, \
61 size_t count, loff_t *ppos);
62
63#define DEBUGFS_WRITE_FUNC(name) \
64static ssize_t il_dbgfs_##name##_write(struct file *file, \
65 const char __user *user_buf, \
66 size_t count, loff_t *ppos);
67
68static int
69il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
70{
71 file->private_data = inode->i_private;
72 return 0;
73}
74
75#define DEBUGFS_READ_FILE_OPS(name) \
76 DEBUGFS_READ_FUNC(name); \
77static const struct file_operations il_dbgfs_##name##_ops = { \
78 .read = il_dbgfs_##name##_read, \
79 .open = il_dbgfs_open_file_generic, \
80 .llseek = generic_file_llseek, \
81};
82
83#define DEBUGFS_WRITE_FILE_OPS(name) \
84 DEBUGFS_WRITE_FUNC(name); \
85static const struct file_operations il_dbgfs_##name##_ops = { \
86 .write = il_dbgfs_##name##_write, \
87 .open = il_dbgfs_open_file_generic, \
88 .llseek = generic_file_llseek, \
89};
90
91#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
92 DEBUGFS_READ_FUNC(name); \
93 DEBUGFS_WRITE_FUNC(name); \
94static const struct file_operations il_dbgfs_##name##_ops = { \
95 .write = il_dbgfs_##name##_write, \
96 .read = il_dbgfs_##name##_read, \
97 .open = il_dbgfs_open_file_generic, \
98 .llseek = generic_file_llseek, \
99};
100
101static ssize_t
102il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
103 loff_t *ppos)
104{
105
106 struct il_priv *il = file->private_data;
107 char *buf;
108 int pos = 0;
109
110 int cnt;
111 ssize_t ret;
112 const size_t bufsz =
113 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
114 buf = kzalloc(bufsz, GFP_KERNEL);
115 if (!buf)
116 return -ENOMEM;
117 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
118 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
119 pos +=
120 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
121 il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
122 }
123 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
124 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
125 pos +=
126 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
127 il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
128 }
129 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
130 pos +=
131 scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
132 il->tx_stats.data_cnt);
133 pos +=
134 scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
135 il->tx_stats.data_bytes);
136 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
137 kfree(buf);
138 return ret;
139}
140
141static ssize_t
142il_dbgfs_clear_traffic_stats_write(struct file *file,
143 const char __user *user_buf, size_t count,
144 loff_t *ppos)
145{
146 struct il_priv *il = file->private_data;
147 u32 clear_flag;
148 char buf[8];
149 int buf_size;
150
151 memset(buf, 0, sizeof(buf));
152 buf_size = min(count, sizeof(buf) - 1);
153 if (copy_from_user(buf, user_buf, buf_size))
154 return -EFAULT;
155 if (sscanf(buf, "%x", &clear_flag) != 1)
156 return -EFAULT;
157 il_clear_traffic_stats(il);
158
159 return count;
160}
161
162static ssize_t
163il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
164 loff_t *ppos)
165{
166
167 struct il_priv *il = file->private_data;
168 char *buf;
169 int pos = 0;
170 int cnt;
171 ssize_t ret;
172 const size_t bufsz =
173 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
174 buf = kzalloc(bufsz, GFP_KERNEL);
175 if (!buf)
176 return -ENOMEM;
177
178 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
179 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
180 pos +=
181 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
182 il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
183 }
184 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
185 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
186 pos +=
187 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
188 il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
189 }
190 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
191 pos +=
192 scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
193 il->rx_stats.data_cnt);
194 pos +=
195 scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
196 il->rx_stats.data_bytes);
197
198 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
199 kfree(buf);
200 return ret;
201}
202
203#define BYTE1_MASK 0x000000ff;
204#define BYTE2_MASK 0x0000ffff;
205#define BYTE3_MASK 0x00ffffff;
206static ssize_t
207il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
208 loff_t *ppos)
209{
210 u32 val;
211 char *buf;
212 ssize_t ret;
213 int i;
214 int pos = 0;
215 struct il_priv *il = file->private_data;
216 size_t bufsz;
217
218 /* default is to dump the entire data segment */
219 if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
220 il->dbgfs_sram_offset = 0x800000;
221 if (il->ucode_type == UCODE_INIT)
222 il->dbgfs_sram_len = il->ucode_init_data.len;
223 else
224 il->dbgfs_sram_len = il->ucode_data.len;
225 }
226 bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
227 buf = kmalloc(bufsz, GFP_KERNEL);
228 if (!buf)
229 return -ENOMEM;
230 pos +=
231 scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
232 il->dbgfs_sram_len);
233 pos +=
234 scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
235 il->dbgfs_sram_offset);
236 for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
237 val =
238 il_read_targ_mem(il,
239 il->dbgfs_sram_offset +
240 il->dbgfs_sram_len - i);
241 if (i < 4) {
242 switch (i) {
243 case 1:
244 val &= BYTE1_MASK;
245 break;
246 case 2:
247 val &= BYTE2_MASK;
248 break;
249 case 3:
250 val &= BYTE3_MASK;
251 break;
252 }
253 }
254 if (!(i % 16))
255 pos += scnprintf(buf + pos, bufsz - pos, "\n");
256 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
257 }
258 pos += scnprintf(buf + pos, bufsz - pos, "\n");
259
260 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
261 kfree(buf);
262 return ret;
263}
264
265static ssize_t
266il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
267 size_t count, loff_t *ppos)
268{
269 struct il_priv *il = file->private_data;
270 char buf[64];
271 int buf_size;
272 u32 offset, len;
273
274 memset(buf, 0, sizeof(buf));
275 buf_size = min(count, sizeof(buf) - 1);
276 if (copy_from_user(buf, user_buf, buf_size))
277 return -EFAULT;
278
279 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
280 il->dbgfs_sram_offset = offset;
281 il->dbgfs_sram_len = len;
282 } else {
283 il->dbgfs_sram_offset = 0;
284 il->dbgfs_sram_len = 0;
285 }
286
287 return count;
288}
289
290static ssize_t
291il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
292 loff_t *ppos)
293{
294 struct il_priv *il = file->private_data;
295 struct il_station_entry *station;
296 int max_sta = il->hw_params.max_stations;
297 char *buf;
298 int i, j, pos = 0;
299 ssize_t ret;
300 /* Add 30 for initial string */
301 const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
302
303 buf = kmalloc(bufsz, GFP_KERNEL);
304 if (!buf)
305 return -ENOMEM;
306
307 pos +=
308 scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
309 il->num_stations);
310
311 for (i = 0; i < max_sta; i++) {
312 station = &il->stations[i];
313 if (!station->used)
314 continue;
315 pos +=
316 scnprintf(buf + pos, bufsz - pos,
317 "station %d - addr: %pM, flags: %#x\n", i,
318 station->sta.sta.addr,
319 station->sta.station_flags_msk);
320 pos +=
321 scnprintf(buf + pos, bufsz - pos,
322 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
323 pos +=
324 scnprintf(buf + pos, bufsz - pos,
325 "start_idx\tbitmap\t\t\trate_n_flags\n");
326
327 for (j = 0; j < MAX_TID_COUNT; j++) {
328 pos +=
329 scnprintf(buf + pos, bufsz - pos,
330 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
331 j, station->tid[j].seq_number,
332 station->tid[j].agg.txq_id,
333 station->tid[j].agg.frame_count,
334 station->tid[j].tfds_in_queue,
335 station->tid[j].agg.start_idx,
336 station->tid[j].agg.bitmap,
337 station->tid[j].agg.rate_n_flags);
338
339 if (station->tid[j].agg.wait_for_ba)
340 pos +=
341 scnprintf(buf + pos, bufsz - pos,
342 " - waitforba");
343 pos += scnprintf(buf + pos, bufsz - pos, "\n");
344 }
345
346 pos += scnprintf(buf + pos, bufsz - pos, "\n");
347 }
348
349 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
350 kfree(buf);
351 return ret;
352}
353
354static ssize_t
355il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
356 loff_t *ppos)
357{
358 ssize_t ret;
359 struct il_priv *il = file->private_data;
360 int pos = 0, ofs = 0, buf_size = 0;
361 const u8 *ptr;
362 char *buf;
363 u16 eeprom_ver;
364 size_t eeprom_len = il->cfg->base_params->eeprom_size;
365 buf_size = 4 * eeprom_len + 256;
366
367 if (eeprom_len % 16) {
368 IL_ERR("NVM size is not multiple of 16.\n");
369 return -ENODATA;
370 }
371
372 ptr = il->eeprom;
373 if (!ptr) {
374 IL_ERR("Invalid EEPROM memory\n");
375 return -ENOMEM;
376 }
377
378 /* 4 characters for byte 0xYY */
379 buf = kzalloc(buf_size, GFP_KERNEL);
380 if (!buf) {
381 IL_ERR("Can not allocate Buffer\n");
382 return -ENOMEM;
383 }
384 eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
385 pos +=
386 scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
387 eeprom_ver);
388 for (ofs = 0; ofs < eeprom_len; ofs += 16) {
389 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
390 hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
391 buf_size - pos, 0);
392 pos += strlen(buf + pos);
393 if (buf_size - pos > 0)
394 buf[pos++] = '\n';
395 }
396
397 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
398 kfree(buf);
399 return ret;
400}
401
402static ssize_t
403il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
404 loff_t *ppos)
405{
406 struct il_priv *il = file->private_data;
407 struct ieee80211_channel *channels = NULL;
408 const struct ieee80211_supported_band *supp_band = NULL;
409 int pos = 0, i, bufsz = PAGE_SIZE;
410 char *buf;
411 ssize_t ret;
412
413 if (!test_bit(S_GEO_CONFIGURED, &il->status))
414 return -EAGAIN;
415
416 buf = kzalloc(bufsz, GFP_KERNEL);
417 if (!buf) {
418 IL_ERR("Can not allocate Buffer\n");
419 return -ENOMEM;
420 }
421
422 supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
423 if (supp_band) {
424 channels = supp_band->channels;
425
426 pos +=
427 scnprintf(buf + pos, bufsz - pos,
428 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
429 supp_band->n_channels);
430
431 for (i = 0; i < supp_band->n_channels; i++)
432 pos +=
433 scnprintf(buf + pos, bufsz - pos,
434 "%d: %ddBm: BSS%s%s, %s.\n",
435 channels[i].hw_value,
436 channels[i].max_power,
437 channels[i].
438 flags & IEEE80211_CHAN_RADAR ?
439 " (IEEE 802.11h required)" : "",
440 ((channels[i].
441 flags & IEEE80211_CHAN_NO_IBSS) ||
442 (channels[i].
443 flags & IEEE80211_CHAN_RADAR)) ? "" :
444 ", IBSS",
445 channels[i].
446 flags & IEEE80211_CHAN_PASSIVE_SCAN ?
447 "passive only" : "active/passive");
448 }
449 supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
450 if (supp_band) {
451 channels = supp_band->channels;
452
453 pos +=
454 scnprintf(buf + pos, bufsz - pos,
455 "Displaying %d channels in 5.2GHz band (802.11a)\n",
456 supp_band->n_channels);
457
458 for (i = 0; i < supp_band->n_channels; i++)
459 pos +=
460 scnprintf(buf + pos, bufsz - pos,
461 "%d: %ddBm: BSS%s%s, %s.\n",
462 channels[i].hw_value,
463 channels[i].max_power,
464 channels[i].
465 flags & IEEE80211_CHAN_RADAR ?
466 " (IEEE 802.11h required)" : "",
467 ((channels[i].
468 flags & IEEE80211_CHAN_NO_IBSS) ||
469 (channels[i].
470 flags & IEEE80211_CHAN_RADAR)) ? "" :
471 ", IBSS",
472 channels[i].
473 flags & IEEE80211_CHAN_PASSIVE_SCAN ?
474 "passive only" : "active/passive");
475 }
476 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
477 kfree(buf);
478 return ret;
479}
480
481static ssize_t
482il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
483 loff_t *ppos)
484{
485
486 struct il_priv *il = file->private_data;
487 char buf[512];
488 int pos = 0;
489 const size_t bufsz = sizeof(buf);
490
491 pos +=
492 scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
493 test_bit(S_HCMD_ACTIVE, &il->status));
494 pos +=
495 scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
496 test_bit(S_INT_ENABLED, &il->status));
497 pos +=
498 scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
499 test_bit(S_RF_KILL_HW, &il->status));
500 pos +=
501 scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
502 test_bit(S_CT_KILL, &il->status));
503 pos +=
504 scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
505 test_bit(S_INIT, &il->status));
506 pos +=
507 scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
508 test_bit(S_ALIVE, &il->status));
509 pos +=
510 scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
511 test_bit(S_READY, &il->status));
512 pos +=
513 scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
514 test_bit(S_TEMPERATURE, &il->status));
515 pos +=
516 scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
517 test_bit(S_GEO_CONFIGURED, &il->status));
518 pos +=
519 scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
520 test_bit(S_EXIT_PENDING, &il->status));
521 pos +=
522 scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
523 test_bit(S_STATS, &il->status));
524 pos +=
525 scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
526 test_bit(S_SCANNING, &il->status));
527 pos +=
528 scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
529 test_bit(S_SCAN_ABORTING, &il->status));
530 pos +=
531 scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
532 test_bit(S_SCAN_HW, &il->status));
533 pos +=
534 scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
535 test_bit(S_POWER_PMI, &il->status));
536 pos +=
537 scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
538 test_bit(S_FW_ERROR, &il->status));
539 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
540}
541
542static ssize_t
543il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
544 loff_t *ppos)
545{
546
547 struct il_priv *il = file->private_data;
548 int pos = 0;
549 int cnt = 0;
550 char *buf;
551 int bufsz = 24 * 64; /* 24 items * 64 char per item */
552 ssize_t ret;
553
554 buf = kzalloc(bufsz, GFP_KERNEL);
555 if (!buf) {
556 IL_ERR("Can not allocate Buffer\n");
557 return -ENOMEM;
558 }
559
560 pos +=
561 scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
562
563 pos +=
564 scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
565 il->isr_stats.hw);
566 pos +=
567 scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
568 il->isr_stats.sw);
569 if (il->isr_stats.sw || il->isr_stats.hw) {
570 pos +=
571 scnprintf(buf + pos, bufsz - pos,
572 "\tLast Restarting Code: 0x%X\n",
573 il->isr_stats.err_code);
574 }
575#ifdef CONFIG_IWLEGACY_DEBUG
576 pos +=
577 scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
578 il->isr_stats.sch);
579 pos +=
580 scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
581 il->isr_stats.alive);
582#endif
583 pos +=
584 scnprintf(buf + pos, bufsz - pos,
585 "HW RF KILL switch toggled:\t %u\n",
586 il->isr_stats.rfkill);
587
588 pos +=
589 scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
590 il->isr_stats.ctkill);
591
592 pos +=
593 scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
594 il->isr_stats.wakeup);
595
596 pos +=
597 scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
598 il->isr_stats.rx);
599 for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
600 if (il->isr_stats.handlers[cnt] > 0)
601 pos +=
602 scnprintf(buf + pos, bufsz - pos,
603 "\tRx handler[%36s]:\t\t %u\n",
604 il_get_cmd_string(cnt),
605 il->isr_stats.handlers[cnt]);
606 }
607
608 pos +=
609 scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
610 il->isr_stats.tx);
611
612 pos +=
613 scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
614 il->isr_stats.unhandled);
615
616 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
617 kfree(buf);
618 return ret;
619}
620
621static ssize_t
622il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
623 size_t count, loff_t *ppos)
624{
625 struct il_priv *il = file->private_data;
626 char buf[8];
627 int buf_size;
628 u32 reset_flag;
629
630 memset(buf, 0, sizeof(buf));
631 buf_size = min(count, sizeof(buf) - 1);
632 if (copy_from_user(buf, user_buf, buf_size))
633 return -EFAULT;
634 if (sscanf(buf, "%x", &reset_flag) != 1)
635 return -EFAULT;
636 if (reset_flag == 0)
637 il_clear_isr_stats(il);
638
639 return count;
640}
641
642static ssize_t
643il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
644 loff_t *ppos)
645{
646 struct il_priv *il = file->private_data;
647 struct il_rxon_context *ctx = &il->ctx;
648 int pos = 0, i;
649 char buf[256];
650 const size_t bufsz = sizeof(buf);
651
652 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
653 for (i = 0; i < AC_NUM; i++) {
654 pos +=
655 scnprintf(buf + pos, bufsz - pos,
656 "\tcw_min\tcw_max\taifsn\ttxop\n");
657 pos +=
658 scnprintf(buf + pos, bufsz - pos,
659 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
660 ctx->qos_data.def_qos_parm.ac[i].cw_min,
661 ctx->qos_data.def_qos_parm.ac[i].cw_max,
662 ctx->qos_data.def_qos_parm.ac[i].aifsn,
663 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
664 }
665
666 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
667}
668
669static ssize_t
670il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
671 size_t count, loff_t *ppos)
672{
673 struct il_priv *il = file->private_data;
674 char buf[8];
675 int buf_size;
676 int ht40;
677
678 memset(buf, 0, sizeof(buf));
679 buf_size = min(count, sizeof(buf) - 1);
680 if (copy_from_user(buf, user_buf, buf_size))
681 return -EFAULT;
682 if (sscanf(buf, "%d", &ht40) != 1)
683 return -EFAULT;
684 if (!il_is_any_associated(il))
685 il->disable_ht40 = ht40 ? true : false;
686 else {
687 IL_ERR("Sta associated with AP - "
688 "Change to 40MHz channel support is not allowed\n");
689 return -EINVAL;
690 }
691
692 return count;
693}
694
695static ssize_t
696il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
697 size_t count, loff_t *ppos)
698{
699 struct il_priv *il = file->private_data;
700 char buf[100];
701 int pos = 0;
702 const size_t bufsz = sizeof(buf);
703
704 pos +=
705 scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
706 il->disable_ht40 ? "Disabled" : "Enabled");
707 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
708}
709
710DEBUGFS_READ_WRITE_FILE_OPS(sram);
711DEBUGFS_READ_FILE_OPS(nvm);
712DEBUGFS_READ_FILE_OPS(stations);
713DEBUGFS_READ_FILE_OPS(channels);
714DEBUGFS_READ_FILE_OPS(status);
715DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
716DEBUGFS_READ_FILE_OPS(qos);
717DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
718
719static ssize_t
720il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
721 size_t count, loff_t *ppos)
722{
723 struct il_priv *il = file->private_data;
724 int pos = 0, ofs = 0;
725 int cnt = 0, entry;
726 struct il_tx_queue *txq;
727 struct il_queue *q;
728 struct il_rx_queue *rxq = &il->rxq;
729 char *buf;
730 int bufsz =
731 ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
732 (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
733 const u8 *ptr;
734 ssize_t ret;
735
736 if (!il->txq) {
737 IL_ERR("txq not ready\n");
738 return -EAGAIN;
739 }
740 buf = kzalloc(bufsz, GFP_KERNEL);
741 if (!buf) {
742 IL_ERR("Can not allocate buffer\n");
743 return -ENOMEM;
744 }
745 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
746 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
747 txq = &il->txq[cnt];
748 q = &txq->q;
749 pos +=
750 scnprintf(buf + pos, bufsz - pos,
751 "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
752 q->read_ptr, q->write_ptr);
753 }
754 if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
755 ptr = il->tx_traffic;
756 pos +=
757 scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
758 il->tx_traffic_idx);
759 for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
760 for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
761 entry++, ofs += 16) {
762 pos +=
763 scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
764 ofs);
765 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
766 buf + pos, bufsz - pos, 0);
767 pos += strlen(buf + pos);
768 if (bufsz - pos > 0)
769 buf[pos++] = '\n';
770 }
771 }
772 }
773
774 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
775 pos +=
776 scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
777 rxq->read, rxq->write);
778
779 if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
780 ptr = il->rx_traffic;
781 pos +=
782 scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
783 il->rx_traffic_idx);
784 for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
785 for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
786 entry++, ofs += 16) {
787 pos +=
788 scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
789 ofs);
790 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
791 buf + pos, bufsz - pos, 0);
792 pos += strlen(buf + pos);
793 if (bufsz - pos > 0)
794 buf[pos++] = '\n';
795 }
796 }
797 }
798
799 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
800 kfree(buf);
801 return ret;
802}
803
804static ssize_t
805il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
806 size_t count, loff_t *ppos)
807{
808 struct il_priv *il = file->private_data;
809 char buf[8];
810 int buf_size;
811 int traffic_log;
812
813 memset(buf, 0, sizeof(buf));
814 buf_size = min(count, sizeof(buf) - 1);
815 if (copy_from_user(buf, user_buf, buf_size))
816 return -EFAULT;
817 if (sscanf(buf, "%d", &traffic_log) != 1)
818 return -EFAULT;
819 if (traffic_log == 0)
820 il_reset_traffic_log(il);
821
822 return count;
823}
824
825static ssize_t
826il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
827 loff_t *ppos)
828{
829
830 struct il_priv *il = file->private_data;
831 struct il_tx_queue *txq;
832 struct il_queue *q;
833 char *buf;
834 int pos = 0;
835 int cnt;
836 int ret;
837 const size_t bufsz =
838 sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
839
840 if (!il->txq) {
841 IL_ERR("txq not ready\n");
842 return -EAGAIN;
843 }
844 buf = kzalloc(bufsz, GFP_KERNEL);
845 if (!buf)
846 return -ENOMEM;
847
848 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
849 txq = &il->txq[cnt];
850 q = &txq->q;
851 pos +=
852 scnprintf(buf + pos, bufsz - pos,
853 "hwq %.2d: read=%u write=%u stop=%d"
854 " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
855 q->read_ptr, q->write_ptr,
856 !!test_bit(cnt, il->queue_stopped),
857 txq->swq_id, txq->swq_id & 3,
858 (txq->swq_id >> 2) & 0x1f);
859 if (cnt >= 4)
860 continue;
861 /* for the ACs, display the stop count too */
862 pos +=
863 scnprintf(buf + pos, bufsz - pos,
864 " stop-count: %d\n",
865 atomic_read(&il->queue_stop_count[cnt]));
866 }
867 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
868 kfree(buf);
869 return ret;
870}
871
872static ssize_t
873il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
874 loff_t *ppos)
875{
876
877 struct il_priv *il = file->private_data;
878 struct il_rx_queue *rxq = &il->rxq;
879 char buf[256];
880 int pos = 0;
881 const size_t bufsz = sizeof(buf);
882
883 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
884 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
885 pos +=
886 scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
887 rxq->free_count);
888 if (rxq->rb_stts) {
889 pos +=
890 scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
891 le16_to_cpu(rxq->rb_stts->
892 closed_rb_num) & 0x0FFF);
893 } else {
894 pos +=
895 scnprintf(buf + pos, bufsz - pos,
896 "closed_rb_num: Not Allocated\n");
897 }
898 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
899}
900
901static ssize_t
902il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
903 size_t count, loff_t *ppos)
904{
905 struct il_priv *il = file->private_data;
906 return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
907 count, ppos);
908}
909
910static ssize_t
911il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
912 size_t count, loff_t *ppos)
913{
914 struct il_priv *il = file->private_data;
915 return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
916 count, ppos);
917}
918
919static ssize_t
920il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
921 size_t count, loff_t *ppos)
922{
923 struct il_priv *il = file->private_data;
924 return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
925 count, ppos);
926}
927
928static ssize_t
929il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
930 size_t count, loff_t *ppos)
931{
932
933 struct il_priv *il = file->private_data;
934 int pos = 0;
935 int cnt = 0;
936 char *buf;
937 int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
938 ssize_t ret;
939 struct il_sensitivity_data *data;
940
941 data = &il->sensitivity_data;
942 buf = kzalloc(bufsz, GFP_KERNEL);
943 if (!buf) {
944 IL_ERR("Can not allocate Buffer\n");
945 return -ENOMEM;
946 }
947
948 pos +=
949 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
950 data->auto_corr_ofdm);
951 pos +=
952 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
953 data->auto_corr_ofdm_mrc);
954 pos +=
955 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
956 data->auto_corr_ofdm_x1);
957 pos +=
958 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
959 data->auto_corr_ofdm_mrc_x1);
960 pos +=
961 scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
962 data->auto_corr_cck);
963 pos +=
964 scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
965 data->auto_corr_cck_mrc);
966 pos +=
967 scnprintf(buf + pos, bufsz - pos,
968 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
969 data->last_bad_plcp_cnt_ofdm);
970 pos +=
971 scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
972 data->last_fa_cnt_ofdm);
973 pos +=
974 scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
975 data->last_bad_plcp_cnt_cck);
976 pos +=
977 scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
978 data->last_fa_cnt_cck);
979 pos +=
980 scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
981 data->nrg_curr_state);
982 pos +=
983 scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
984 data->nrg_prev_state);
985 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
986 for (cnt = 0; cnt < 10; cnt++) {
987 pos +=
988 scnprintf(buf + pos, bufsz - pos, " %u",
989 data->nrg_value[cnt]);
990 }
991 pos += scnprintf(buf + pos, bufsz - pos, "\n");
992 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
993 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
994 pos +=
995 scnprintf(buf + pos, bufsz - pos, " %u",
996 data->nrg_silence_rssi[cnt]);
997 }
998 pos += scnprintf(buf + pos, bufsz - pos, "\n");
999 pos +=
1000 scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
1001 data->nrg_silence_ref);
1002 pos +=
1003 scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
1004 data->nrg_energy_idx);
1005 pos +=
1006 scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
1007 data->nrg_silence_idx);
1008 pos +=
1009 scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
1010 data->nrg_th_cck);
1011 pos +=
1012 scnprintf(buf + pos, bufsz - pos,
1013 "nrg_auto_corr_silence_diff:\t %u\n",
1014 data->nrg_auto_corr_silence_diff);
1015 pos +=
1016 scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
1017 data->num_in_cck_no_fa);
1018 pos +=
1019 scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
1020 data->nrg_th_ofdm);
1021
1022 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1023 kfree(buf);
1024 return ret;
1025}
1026
1027static ssize_t
1028il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
1029 size_t count, loff_t *ppos)
1030{
1031
1032 struct il_priv *il = file->private_data;
1033 int pos = 0;
1034 int cnt = 0;
1035 char *buf;
1036 int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
1037 ssize_t ret;
1038 struct il_chain_noise_data *data;
1039
1040 data = &il->chain_noise_data;
1041 buf = kzalloc(bufsz, GFP_KERNEL);
1042 if (!buf) {
1043 IL_ERR("Can not allocate Buffer\n");
1044 return -ENOMEM;
1045 }
1046
1047 pos +=
1048 scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1049 data->active_chains);
1050 pos +=
1051 scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1052 data->chain_noise_a);
1053 pos +=
1054 scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1055 data->chain_noise_b);
1056 pos +=
1057 scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1058 data->chain_noise_c);
1059 pos +=
1060 scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1061 data->chain_signal_a);
1062 pos +=
1063 scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1064 data->chain_signal_b);
1065 pos +=
1066 scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1067 data->chain_signal_c);
1068 pos +=
1069 scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1070 data->beacon_count);
1071
1072 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1073 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1074 pos +=
1075 scnprintf(buf + pos, bufsz - pos, " %u",
1076 data->disconn_array[cnt]);
1077 }
1078 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1079 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1080 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1081 pos +=
1082 scnprintf(buf + pos, bufsz - pos, " %u",
1083 data->delta_gain_code[cnt]);
1084 }
1085 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1086 pos +=
1087 scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1088 data->radio_write);
1089 pos +=
1090 scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1091 data->state);
1092
1093 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1094 kfree(buf);
1095 return ret;
1096}
1097
1098static ssize_t
1099il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
1100 size_t count, loff_t *ppos)
1101{
1102 struct il_priv *il = file->private_data;
1103 char buf[60];
1104 int pos = 0;
1105 const size_t bufsz = sizeof(buf);
1106 u32 pwrsave_status;
1107
1108 pwrsave_status =
1109 _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1110
1111 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1112 pos +=
1113 scnprintf(buf + pos, bufsz - pos, "%s\n",
1114 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1115 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1116 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1117 "error");
1118
1119 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1120}
1121
1122static ssize_t
1123il_dbgfs_clear_ucode_stats_write(struct file *file,
1124 const char __user *user_buf, size_t count,
1125 loff_t *ppos)
1126{
1127 struct il_priv *il = file->private_data;
1128 char buf[8];
1129 int buf_size;
1130 int clear;
1131
1132 memset(buf, 0, sizeof(buf));
1133 buf_size = min(count, sizeof(buf) - 1);
1134 if (copy_from_user(buf, user_buf, buf_size))
1135 return -EFAULT;
1136 if (sscanf(buf, "%d", &clear) != 1)
1137 return -EFAULT;
1138
1139 /* make request to uCode to retrieve stats information */
1140 mutex_lock(&il->mutex);
1141 il_send_stats_request(il, CMD_SYNC, true);
1142 mutex_unlock(&il->mutex);
1143
1144 return count;
1145}
1146
1147static ssize_t
1148il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
1149 size_t count, loff_t *ppos)
1150{
1151
1152 struct il_priv *il = file->private_data;
1153 int len = 0;
1154 char buf[20];
1155
1156 len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
1157 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1158}
1159
1160static ssize_t
1161il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
1162 size_t count, loff_t *ppos)
1163{
1164
1165 struct il_priv *il = file->private_data;
1166 int len = 0;
1167 char buf[20];
1168
1169 len =
1170 sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
1171 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1172}
1173
1174static ssize_t
1175il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
1176 loff_t *ppos)
1177{
1178 struct il_priv *il = file->private_data;
1179 char *buf;
1180 int pos = 0;
1181 ssize_t ret = -EFAULT;
1182
1183 if (il->cfg->ops->lib->dump_fh) {
1184 ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
1185 if (buf) {
1186 ret =
1187 simple_read_from_buffer(user_buf, count, ppos, buf,
1188 pos);
1189 kfree(buf);
1190 }
1191 }
1192
1193 return ret;
1194}
1195
1196static ssize_t
1197il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
1198 size_t count, loff_t *ppos)
1199{
1200
1201 struct il_priv *il = file->private_data;
1202 int pos = 0;
1203 char buf[12];
1204 const size_t bufsz = sizeof(buf);
1205
1206 pos +=
1207 scnprintf(buf + pos, bufsz - pos, "%d\n",
1208 il->missed_beacon_threshold);
1209
1210 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1211}
1212
1213static ssize_t
1214il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
1215 size_t count, loff_t *ppos)
1216{
1217 struct il_priv *il = file->private_data;
1218 char buf[8];
1219 int buf_size;
1220 int missed;
1221
1222 memset(buf, 0, sizeof(buf));
1223 buf_size = min(count, sizeof(buf) - 1);
1224 if (copy_from_user(buf, user_buf, buf_size))
1225 return -EFAULT;
1226 if (sscanf(buf, "%d", &missed) != 1)
1227 return -EINVAL;
1228
1229 if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
1230 missed > IL_MISSED_BEACON_THRESHOLD_MAX)
1231 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
1232 else
1233 il->missed_beacon_threshold = missed;
1234
1235 return count;
1236}
1237
1238static ssize_t
1239il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
1240 size_t count, loff_t *ppos)
1241{
1242
1243 struct il_priv *il = file->private_data;
1244 int pos = 0;
1245 char buf[300];
1246 const size_t bufsz = sizeof(buf);
1247 struct il_force_reset *force_reset;
1248
1249 force_reset = &il->force_reset;
1250
1251 pos +=
1252 scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
1253 force_reset->reset_request_count);
1254 pos +=
1255 scnprintf(buf + pos, bufsz - pos,
1256 "\tnumber of reset request success: %d\n",
1257 force_reset->reset_success_count);
1258 pos +=
1259 scnprintf(buf + pos, bufsz - pos,
1260 "\tnumber of reset request reject: %d\n",
1261 force_reset->reset_reject_count);
1262 pos +=
1263 scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
1264 force_reset->reset_duration);
1265
1266 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1267}
1268
1269static ssize_t
1270il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
1271 size_t count, loff_t *ppos)
1272{
1273
1274 int ret;
1275 struct il_priv *il = file->private_data;
1276
1277 ret = il_force_reset(il, true);
1278
1279 return ret ? ret : count;
1280}
1281
1282static ssize_t
1283il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
1284 size_t count, loff_t *ppos)
1285{
1286
1287 struct il_priv *il = file->private_data;
1288 char buf[8];
1289 int buf_size;
1290 int timeout;
1291
1292 memset(buf, 0, sizeof(buf));
1293 buf_size = min(count, sizeof(buf) - 1);
1294 if (copy_from_user(buf, user_buf, buf_size))
1295 return -EFAULT;
1296 if (sscanf(buf, "%d", &timeout) != 1)
1297 return -EINVAL;
1298 if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
1299 timeout = IL_DEF_WD_TIMEOUT;
1300
1301 il->cfg->base_params->wd_timeout = timeout;
1302 il_setup_watchdog(il);
1303 return count;
1304}
1305
1306DEBUGFS_READ_FILE_OPS(rx_stats);
1307DEBUGFS_READ_FILE_OPS(tx_stats);
1308DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1309DEBUGFS_READ_FILE_OPS(rx_queue);
1310DEBUGFS_READ_FILE_OPS(tx_queue);
1311DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1312DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1313DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1314DEBUGFS_READ_FILE_OPS(sensitivity);
1315DEBUGFS_READ_FILE_OPS(chain_noise);
1316DEBUGFS_READ_FILE_OPS(power_save_status);
1317DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
1318DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
1319DEBUGFS_READ_FILE_OPS(fh_reg);
1320DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1321DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1322DEBUGFS_READ_FILE_OPS(rxon_flags);
1323DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1324DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1325
1326/*
1327 * Create the debugfs files and directories
1328 *
1329 */
1330int
1331il_dbgfs_register(struct il_priv *il, const char *name)
1332{
1333 struct dentry *phyd = il->hw->wiphy->debugfsdir;
1334 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1335
1336 dir_drv = debugfs_create_dir(name, phyd);
1337 if (!dir_drv)
1338 return -ENOMEM;
1339
1340 il->debugfs_dir = dir_drv;
1341
1342 dir_data = debugfs_create_dir("data", dir_drv);
1343 if (!dir_data)
1344 goto err;
1345 dir_rf = debugfs_create_dir("rf", dir_drv);
1346 if (!dir_rf)
1347 goto err;
1348 dir_debug = debugfs_create_dir("debug", dir_drv);
1349 if (!dir_debug)
1350 goto err;
1351
1352 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1353 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1354 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1355 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1356 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1357 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1358 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1359 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1360 DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
1361 DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
1362 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1363 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1364 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1365 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1366 DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
1367 DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
1368 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1369 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1370 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1371 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1372 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1373 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1374
1375 if (il->cfg->base_params->sensitivity_calib_by_driver)
1376 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1377 if (il->cfg->base_params->chain_noise_calib_by_driver)
1378 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1379 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1380 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1381 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1382 if (il->cfg->base_params->sensitivity_calib_by_driver)
1383 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1384 &il->disable_sens_cal);
1385 if (il->cfg->base_params->chain_noise_calib_by_driver)
1386 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1387 &il->disable_chain_noise_cal);
1388 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
1389 return 0;
1390
1391err:
1392 IL_ERR("Can't create the debugfs directory\n");
1393 il_dbgfs_unregister(il);
1394 return -ENOMEM;
1395}
1396EXPORT_SYMBOL(il_dbgfs_register);
1397
1398/**
1399 * Remove the debugfs files and directories
1400 *
1401 */
1402void
1403il_dbgfs_unregister(struct il_priv *il)
1404{
1405 if (!il->debugfs_dir)
1406 return;
1407
1408 debugfs_remove_recursive(il->debugfs_dir);
1409 il->debugfs_dir = NULL;
1410}
1411EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
deleted file mode 100644
index cfabb38793ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
+++ /dev/null
@@ -1,523 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-3945-debugfs.h"
30
31
32static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
33{
34 int p = 0;
35
36 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
37 le32_to_cpu(priv->_3945.statistics.flag));
38 if (le32_to_cpu(priv->_3945.statistics.flag) &
39 UCODE_STATISTICS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(priv->_3945.statistics.flag) &
44 UCODE_STATISTICS_FREQUENCY_MSK)
45 ? "2.4 GHz" : "5.2 GHz");
46 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
47 (le32_to_cpu(priv->_3945.statistics.flag) &
48 UCODE_STATISTICS_NARROW_BAND_MSK)
49 ? "enabled" : "disabled");
50 return p;
51}
52
53ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
54 char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 struct iwl_priv *priv = file->private_data;
58 int pos = 0;
59 char *buf;
60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
62 ssize_t ret;
63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
64 *max_ofdm;
65 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
66 struct iwl39_statistics_rx_non_phy *general, *accum_general;
67 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
68
69 if (!iwl_legacy_is_alive(priv))
70 return -EAGAIN;
71
72 buf = kzalloc(bufsz, GFP_KERNEL);
73 if (!buf) {
74 IWL_ERR(priv, "Can not allocate Buffer\n");
75 return -ENOMEM;
76 }
77
78 /*
79 * The statistic information display here is based on
80 * the last statistics notification from uCode
81 * might not reflect the current uCode activity
82 */
83 ofdm = &priv->_3945.statistics.rx.ofdm;
84 cck = &priv->_3945.statistics.rx.cck;
85 general = &priv->_3945.statistics.rx.general;
86 accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
87 accum_cck = &priv->_3945.accum_statistics.rx.cck;
88 accum_general = &priv->_3945.accum_statistics.rx.general;
89 delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
90 delta_cck = &priv->_3945.delta_statistics.rx.cck;
91 delta_general = &priv->_3945.delta_statistics.rx.general;
92 max_ofdm = &priv->_3945.max_delta.rx.ofdm;
93 max_cck = &priv->_3945.max_delta.rx.cck;
94 max_general = &priv->_3945.max_delta.rx.general;
95
96 pos += iwl3945_statistics_flag(priv, buf, bufsz);
97 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
98 "acumulative delta max\n",
99 "Statistics_Rx - OFDM:");
100 pos += scnprintf(buf + pos, bufsz - pos,
101 " %-30s %10u %10u %10u %10u\n",
102 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
103 accum_ofdm->ina_cnt,
104 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 " %-30s %10u %10u %10u %10u\n",
107 "fina_cnt:",
108 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
109 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
110 pos += scnprintf(buf + pos, bufsz - pos,
111 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
112 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
113 delta_ofdm->plcp_err, max_ofdm->plcp_err);
114 pos += scnprintf(buf + pos, bufsz - pos,
115 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
116 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
117 delta_ofdm->crc32_err, max_ofdm->crc32_err);
118 pos += scnprintf(buf + pos, bufsz - pos,
119 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
120 le32_to_cpu(ofdm->overrun_err),
121 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
122 max_ofdm->overrun_err);
123 pos += scnprintf(buf + pos, bufsz - pos,
124 " %-30s %10u %10u %10u %10u\n",
125 "early_overrun_err:",
126 le32_to_cpu(ofdm->early_overrun_err),
127 accum_ofdm->early_overrun_err,
128 delta_ofdm->early_overrun_err,
129 max_ofdm->early_overrun_err);
130 pos += scnprintf(buf + pos, bufsz - pos,
131 " %-30s %10u %10u %10u %10u\n",
132 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
133 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
134 max_ofdm->crc32_good);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
137 le32_to_cpu(ofdm->false_alarm_cnt),
138 accum_ofdm->false_alarm_cnt,
139 delta_ofdm->false_alarm_cnt,
140 max_ofdm->false_alarm_cnt);
141 pos += scnprintf(buf + pos, bufsz - pos,
142 " %-30s %10u %10u %10u %10u\n",
143 "fina_sync_err_cnt:",
144 le32_to_cpu(ofdm->fina_sync_err_cnt),
145 accum_ofdm->fina_sync_err_cnt,
146 delta_ofdm->fina_sync_err_cnt,
147 max_ofdm->fina_sync_err_cnt);
148 pos += scnprintf(buf + pos, bufsz - pos,
149 " %-30s %10u %10u %10u %10u\n",
150 "sfd_timeout:",
151 le32_to_cpu(ofdm->sfd_timeout),
152 accum_ofdm->sfd_timeout,
153 delta_ofdm->sfd_timeout,
154 max_ofdm->sfd_timeout);
155 pos += scnprintf(buf + pos, bufsz - pos,
156 " %-30s %10u %10u %10u %10u\n",
157 "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout),
159 accum_ofdm->fina_timeout,
160 delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 " %-30s %10u %10u %10u %10u\n",
164 "unresponded_rts:",
165 le32_to_cpu(ofdm->unresponded_rts),
166 accum_ofdm->unresponded_rts,
167 delta_ofdm->unresponded_rts,
168 max_ofdm->unresponded_rts);
169 pos += scnprintf(buf + pos, bufsz - pos,
170 " %-30s %10u %10u %10u %10u\n",
171 "rxe_frame_lmt_ovrun:",
172 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
173 accum_ofdm->rxe_frame_limit_overrun,
174 delta_ofdm->rxe_frame_limit_overrun,
175 max_ofdm->rxe_frame_limit_overrun);
176 pos += scnprintf(buf + pos, bufsz - pos,
177 " %-30s %10u %10u %10u %10u\n",
178 "sent_ack_cnt:",
179 le32_to_cpu(ofdm->sent_ack_cnt),
180 accum_ofdm->sent_ack_cnt,
181 delta_ofdm->sent_ack_cnt,
182 max_ofdm->sent_ack_cnt);
183 pos += scnprintf(buf + pos, bufsz - pos,
184 " %-30s %10u %10u %10u %10u\n",
185 "sent_cts_cnt:",
186 le32_to_cpu(ofdm->sent_cts_cnt),
187 accum_ofdm->sent_cts_cnt,
188 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
189
190 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
191 "acumulative delta max\n",
192 "Statistics_Rx - CCK:");
193 pos += scnprintf(buf + pos, bufsz - pos,
194 " %-30s %10u %10u %10u %10u\n",
195 "ina_cnt:",
196 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
197 delta_cck->ina_cnt, max_cck->ina_cnt);
198 pos += scnprintf(buf + pos, bufsz - pos,
199 " %-30s %10u %10u %10u %10u\n",
200 "fina_cnt:",
201 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
202 delta_cck->fina_cnt, max_cck->fina_cnt);
203 pos += scnprintf(buf + pos, bufsz - pos,
204 " %-30s %10u %10u %10u %10u\n",
205 "plcp_err:",
206 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
207 delta_cck->plcp_err, max_cck->plcp_err);
208 pos += scnprintf(buf + pos, bufsz - pos,
209 " %-30s %10u %10u %10u %10u\n",
210 "crc32_err:",
211 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
212 delta_cck->crc32_err, max_cck->crc32_err);
213 pos += scnprintf(buf + pos, bufsz - pos,
214 " %-30s %10u %10u %10u %10u\n",
215 "overrun_err:",
216 le32_to_cpu(cck->overrun_err),
217 accum_cck->overrun_err,
218 delta_cck->overrun_err, max_cck->overrun_err);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 " %-30s %10u %10u %10u %10u\n",
221 "early_overrun_err:",
222 le32_to_cpu(cck->early_overrun_err),
223 accum_cck->early_overrun_err,
224 delta_cck->early_overrun_err,
225 max_cck->early_overrun_err);
226 pos += scnprintf(buf + pos, bufsz - pos,
227 " %-30s %10u %10u %10u %10u\n",
228 "crc32_good:",
229 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
230 delta_cck->crc32_good,
231 max_cck->crc32_good);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 " %-30s %10u %10u %10u %10u\n",
234 "false_alarm_cnt:",
235 le32_to_cpu(cck->false_alarm_cnt),
236 accum_cck->false_alarm_cnt,
237 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 " %-30s %10u %10u %10u %10u\n",
240 "fina_sync_err_cnt:",
241 le32_to_cpu(cck->fina_sync_err_cnt),
242 accum_cck->fina_sync_err_cnt,
243 delta_cck->fina_sync_err_cnt,
244 max_cck->fina_sync_err_cnt);
245 pos += scnprintf(buf + pos, bufsz - pos,
246 " %-30s %10u %10u %10u %10u\n",
247 "sfd_timeout:",
248 le32_to_cpu(cck->sfd_timeout),
249 accum_cck->sfd_timeout,
250 delta_cck->sfd_timeout, max_cck->sfd_timeout);
251 pos += scnprintf(buf + pos, bufsz - pos,
252 " %-30s %10u %10u %10u %10u\n",
253 "fina_timeout:",
254 le32_to_cpu(cck->fina_timeout),
255 accum_cck->fina_timeout,
256 delta_cck->fina_timeout, max_cck->fina_timeout);
257 pos += scnprintf(buf + pos, bufsz - pos,
258 " %-30s %10u %10u %10u %10u\n",
259 "unresponded_rts:",
260 le32_to_cpu(cck->unresponded_rts),
261 accum_cck->unresponded_rts,
262 delta_cck->unresponded_rts,
263 max_cck->unresponded_rts);
264 pos += scnprintf(buf + pos, bufsz - pos,
265 " %-30s %10u %10u %10u %10u\n",
266 "rxe_frame_lmt_ovrun:",
267 le32_to_cpu(cck->rxe_frame_limit_overrun),
268 accum_cck->rxe_frame_limit_overrun,
269 delta_cck->rxe_frame_limit_overrun,
270 max_cck->rxe_frame_limit_overrun);
271 pos += scnprintf(buf + pos, bufsz - pos,
272 " %-30s %10u %10u %10u %10u\n",
273 "sent_ack_cnt:",
274 le32_to_cpu(cck->sent_ack_cnt),
275 accum_cck->sent_ack_cnt,
276 delta_cck->sent_ack_cnt,
277 max_cck->sent_ack_cnt);
278 pos += scnprintf(buf + pos, bufsz - pos,
279 " %-30s %10u %10u %10u %10u\n",
280 "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt,
283 delta_cck->sent_cts_cnt,
284 max_cck->sent_cts_cnt);
285
286 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
287 "acumulative delta max\n",
288 "Statistics_Rx - GENERAL:");
289 pos += scnprintf(buf + pos, bufsz - pos,
290 " %-30s %10u %10u %10u %10u\n",
291 "bogus_cts:",
292 le32_to_cpu(general->bogus_cts),
293 accum_general->bogus_cts,
294 delta_general->bogus_cts, max_general->bogus_cts);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 " %-30s %10u %10u %10u %10u\n",
297 "bogus_ack:",
298 le32_to_cpu(general->bogus_ack),
299 accum_general->bogus_ack,
300 delta_general->bogus_ack, max_general->bogus_ack);
301 pos += scnprintf(buf + pos, bufsz - pos,
302 " %-30s %10u %10u %10u %10u\n",
303 "non_bssid_frames:",
304 le32_to_cpu(general->non_bssid_frames),
305 accum_general->non_bssid_frames,
306 delta_general->non_bssid_frames,
307 max_general->non_bssid_frames);
308 pos += scnprintf(buf + pos, bufsz - pos,
309 " %-30s %10u %10u %10u %10u\n",
310 "filtered_frames:",
311 le32_to_cpu(general->filtered_frames),
312 accum_general->filtered_frames,
313 delta_general->filtered_frames,
314 max_general->filtered_frames);
315 pos += scnprintf(buf + pos, bufsz - pos,
316 " %-30s %10u %10u %10u %10u\n",
317 "non_channel_beacons:",
318 le32_to_cpu(general->non_channel_beacons),
319 accum_general->non_channel_beacons,
320 delta_general->non_channel_beacons,
321 max_general->non_channel_beacons);
322
323 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
324 kfree(buf);
325 return ret;
326}
327
328ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
329 char __user *user_buf,
330 size_t count, loff_t *ppos)
331{
332 struct iwl_priv *priv = file->private_data;
333 int pos = 0;
334 char *buf;
335 int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
336 ssize_t ret;
337 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
338
339 if (!iwl_legacy_is_alive(priv))
340 return -EAGAIN;
341
342 buf = kzalloc(bufsz, GFP_KERNEL);
343 if (!buf) {
344 IWL_ERR(priv, "Can not allocate Buffer\n");
345 return -ENOMEM;
346 }
347
348 /*
349 * The statistic information display here is based on
350 * the last statistics notification from uCode
351 * might not reflect the current uCode activity
352 */
353 tx = &priv->_3945.statistics.tx;
354 accum_tx = &priv->_3945.accum_statistics.tx;
355 delta_tx = &priv->_3945.delta_statistics.tx;
356 max_tx = &priv->_3945.max_delta.tx;
357 pos += iwl3945_statistics_flag(priv, buf, bufsz);
358 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
359 "acumulative delta max\n",
360 "Statistics_Tx:");
361 pos += scnprintf(buf + pos, bufsz - pos,
362 " %-30s %10u %10u %10u %10u\n",
363 "preamble:",
364 le32_to_cpu(tx->preamble_cnt),
365 accum_tx->preamble_cnt,
366 delta_tx->preamble_cnt, max_tx->preamble_cnt);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 " %-30s %10u %10u %10u %10u\n",
369 "rx_detected_cnt:",
370 le32_to_cpu(tx->rx_detected_cnt),
371 accum_tx->rx_detected_cnt,
372 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 " %-30s %10u %10u %10u %10u\n",
375 "bt_prio_defer_cnt:",
376 le32_to_cpu(tx->bt_prio_defer_cnt),
377 accum_tx->bt_prio_defer_cnt,
378 delta_tx->bt_prio_defer_cnt,
379 max_tx->bt_prio_defer_cnt);
380 pos += scnprintf(buf + pos, bufsz - pos,
381 " %-30s %10u %10u %10u %10u\n",
382 "bt_prio_kill_cnt:",
383 le32_to_cpu(tx->bt_prio_kill_cnt),
384 accum_tx->bt_prio_kill_cnt,
385 delta_tx->bt_prio_kill_cnt,
386 max_tx->bt_prio_kill_cnt);
387 pos += scnprintf(buf + pos, bufsz - pos,
388 " %-30s %10u %10u %10u %10u\n",
389 "few_bytes_cnt:",
390 le32_to_cpu(tx->few_bytes_cnt),
391 accum_tx->few_bytes_cnt,
392 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
393 pos += scnprintf(buf + pos, bufsz - pos,
394 " %-30s %10u %10u %10u %10u\n",
395 "cts_timeout:",
396 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
397 delta_tx->cts_timeout, max_tx->cts_timeout);
398 pos += scnprintf(buf + pos, bufsz - pos,
399 " %-30s %10u %10u %10u %10u\n",
400 "ack_timeout:",
401 le32_to_cpu(tx->ack_timeout),
402 accum_tx->ack_timeout,
403 delta_tx->ack_timeout, max_tx->ack_timeout);
404 pos += scnprintf(buf + pos, bufsz - pos,
405 " %-30s %10u %10u %10u %10u\n",
406 "expected_ack_cnt:",
407 le32_to_cpu(tx->expected_ack_cnt),
408 accum_tx->expected_ack_cnt,
409 delta_tx->expected_ack_cnt,
410 max_tx->expected_ack_cnt);
411 pos += scnprintf(buf + pos, bufsz - pos,
412 " %-30s %10u %10u %10u %10u\n",
413 "actual_ack_cnt:",
414 le32_to_cpu(tx->actual_ack_cnt),
415 accum_tx->actual_ack_cnt,
416 delta_tx->actual_ack_cnt,
417 max_tx->actual_ack_cnt);
418
419 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
420 kfree(buf);
421 return ret;
422}
423
424ssize_t iwl3945_ucode_general_stats_read(struct file *file,
425 char __user *user_buf,
426 size_t count, loff_t *ppos)
427{
428 struct iwl_priv *priv = file->private_data;
429 int pos = 0;
430 char *buf;
431 int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
432 ssize_t ret;
433 struct iwl39_statistics_general *general, *accum_general;
434 struct iwl39_statistics_general *delta_general, *max_general;
435 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
436 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
437
438 if (!iwl_legacy_is_alive(priv))
439 return -EAGAIN;
440
441 buf = kzalloc(bufsz, GFP_KERNEL);
442 if (!buf) {
443 IWL_ERR(priv, "Can not allocate Buffer\n");
444 return -ENOMEM;
445 }
446
447 /*
448 * The statistic information display here is based on
449 * the last statistics notification from uCode
450 * might not reflect the current uCode activity
451 */
452 general = &priv->_3945.statistics.general;
453 dbg = &priv->_3945.statistics.general.dbg;
454 div = &priv->_3945.statistics.general.div;
455 accum_general = &priv->_3945.accum_statistics.general;
456 delta_general = &priv->_3945.delta_statistics.general;
457 max_general = &priv->_3945.max_delta.general;
458 accum_dbg = &priv->_3945.accum_statistics.general.dbg;
459 delta_dbg = &priv->_3945.delta_statistics.general.dbg;
460 max_dbg = &priv->_3945.max_delta.general.dbg;
461 accum_div = &priv->_3945.accum_statistics.general.div;
462 delta_div = &priv->_3945.delta_statistics.general.div;
463 max_div = &priv->_3945.max_delta.general.div;
464 pos += iwl3945_statistics_flag(priv, buf, bufsz);
465 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
466 "acumulative delta max\n",
467 "Statistics_General:");
468 pos += scnprintf(buf + pos, bufsz - pos,
469 " %-30s %10u %10u %10u %10u\n",
470 "burst_check:",
471 le32_to_cpu(dbg->burst_check),
472 accum_dbg->burst_check,
473 delta_dbg->burst_check, max_dbg->burst_check);
474 pos += scnprintf(buf + pos, bufsz - pos,
475 " %-30s %10u %10u %10u %10u\n",
476 "burst_count:",
477 le32_to_cpu(dbg->burst_count),
478 accum_dbg->burst_count,
479 delta_dbg->burst_count, max_dbg->burst_count);
480 pos += scnprintf(buf + pos, bufsz - pos,
481 " %-30s %10u %10u %10u %10u\n",
482 "sleep_time:",
483 le32_to_cpu(general->sleep_time),
484 accum_general->sleep_time,
485 delta_general->sleep_time, max_general->sleep_time);
486 pos += scnprintf(buf + pos, bufsz - pos,
487 " %-30s %10u %10u %10u %10u\n",
488 "slots_out:",
489 le32_to_cpu(general->slots_out),
490 accum_general->slots_out,
491 delta_general->slots_out, max_general->slots_out);
492 pos += scnprintf(buf + pos, bufsz - pos,
493 " %-30s %10u %10u %10u %10u\n",
494 "slots_idle:",
495 le32_to_cpu(general->slots_idle),
496 accum_general->slots_idle,
497 delta_general->slots_idle, max_general->slots_idle);
498 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
499 le32_to_cpu(general->ttl_timestamp));
500 pos += scnprintf(buf + pos, bufsz - pos,
501 " %-30s %10u %10u %10u %10u\n",
502 "tx_on_a:",
503 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
504 delta_div->tx_on_a, max_div->tx_on_a);
505 pos += scnprintf(buf + pos, bufsz - pos,
506 " %-30s %10u %10u %10u %10u\n",
507 "tx_on_b:",
508 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
509 delta_div->tx_on_b, max_div->tx_on_b);
510 pos += scnprintf(buf + pos, bufsz - pos,
511 " %-30s %10u %10u %10u %10u\n",
512 "exec_time:",
513 le32_to_cpu(div->exec_time), accum_div->exec_time,
514 delta_div->exec_time, max_div->exec_time);
515 pos += scnprintf(buf + pos, bufsz - pos,
516 " %-30s %10u %10u %10u %10u\n",
517 "probe_time:",
518 le32_to_cpu(div->probe_time), accum_div->probe_time,
519 delta_div->probe_time, max_div->probe_time);
520 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
521 kfree(buf);
522 return ret;
523}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
deleted file mode 100644
index 8fef4b32b447..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl3945_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count,
40 loff_t *ppos);
41#else
42static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
43 char __user *user_buf, size_t count,
44 loff_t *ppos)
45{
46 return 0;
47}
48static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
49 char __user *user_buf, size_t count,
50 loff_t *ppos)
51{
52 return 0;
53}
54static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
55 char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 return 0;
59}
60#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
deleted file mode 100644
index 836c9919f82e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
+++ /dev/null
@@ -1,187 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_3945_fh_h__
64#define __iwl_3945_fh_h__
65
66/************************************/
67/* iwl3945 Flow Handler Definitions */
68/************************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH39_MEM_LOWER_BOUND (0x0800)
75#define FH39_MEM_UPPER_BOUND (0x1000)
76
77#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
78#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
79#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
80#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
81#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
82#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
83
84/* TFDB (Transmit Frame Buffer Descriptor) */
85#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
86 ((_ch) * 2 + (buf)) * 0x28)
87#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
88
89/* CBCC channel is [0,2] */
90#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
91#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
92#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
93
94/* RCSR channel is [0,2] */
95#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
96#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
97#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
98#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
99#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
100
101#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
102
103/* RSSR */
104#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
105#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
106
107/* TCSR */
108#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
109#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
110#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
111#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
112
113/* TSSR */
114#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
115#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
116#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
117
118
119/* DBM */
120
121#define FH39_SRVC_CHNL (6)
122
123#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
124#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
125
126#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
127
128#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
129
130#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
131
132#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
133
134#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
135
136#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
137
138#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
139#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
140
141#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
142#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
143
144#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
145
146#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
147
148#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
149#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
150
151#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
152
153#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
154
155#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
156#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
157
158#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
159
160#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
161#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
162
163#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
164#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
165
166#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
167#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
168
169#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
170 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
171 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
172
173#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
174
175struct iwl3945_tfd_tb {
176 __le32 addr;
177 __le32 len;
178} __packed;
179
180struct iwl3945_tfd {
181 __le32 control_flags;
182 struct iwl3945_tfd_tb tbs[4];
183 u8 __pad[28];
184} __packed;
185
186
187#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
deleted file mode 100644
index 5c3a68d3af12..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ /dev/null
@@ -1,291 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
65 * Please use iwl-commands.h for uCode API definitions.
66 * Please use iwl-3945.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_3945_hw__
70#define __iwl_3945_hw__
71
72#include "iwl-eeprom.h"
73
74/* RSSI to dBm */
75#define IWL39_RSSI_OFFSET 95
76
77/*
78 * EEPROM related constants, enums, and structures.
79 */
80#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
81
82/*
83 * Mapping of a Tx power level, at factory calibration temperature,
84 * to a radio/DSP gain table index.
85 * One for each of 5 "sample" power levels in each band.
86 * v_det is measured at the factory, using the 3945's built-in power amplifier
87 * (PA) output voltage detector. This same detector is used during Tx of
88 * long packets in normal operation to provide feedback as to proper output
89 * level.
90 * Data copied from EEPROM.
91 * DO NOT ALTER THIS STRUCTURE!!!
92 */
93struct iwl3945_eeprom_txpower_sample {
94 u8 gain_index; /* index into power (gain) setup table ... */
95 s8 power; /* ... for this pwr level for this chnl group */
96 u16 v_det; /* PA output voltage */
97} __packed;
98
99/*
100 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
101 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
102 * Tx power setup code interpolates between the 5 "sample" power levels
103 * to determine the nominal setup for a requested power level.
104 * Data copied from EEPROM.
105 * DO NOT ALTER THIS STRUCTURE!!!
106 */
107struct iwl3945_eeprom_txpower_group {
108 struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
109 s32 a, b, c, d, e; /* coefficients for voltage->power
110 * formula (signed) */
111 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
112 * frequency (signed) */
113 s8 saturation_power; /* highest power possible by h/w in this
114 * band */
115 u8 group_channel; /* "representative" channel # in this band */
116 s16 temperature; /* h/w temperature at factory calib this band
117 * (signed) */
118} __packed;
119
120/*
121 * Temperature-based Tx-power compensation data, not band-specific.
122 * These coefficients are use to modify a/b/c/d/e coeffs based on
123 * difference between current temperature and factory calib temperature.
124 * Data copied from EEPROM.
125 */
126struct iwl3945_eeprom_temperature_corr {
127 u32 Ta;
128 u32 Tb;
129 u32 Tc;
130 u32 Td;
131 u32 Te;
132} __packed;
133
134/*
135 * EEPROM map
136 */
137struct iwl3945_eeprom {
138 u8 reserved0[16];
139 u16 device_id; /* abs.ofs: 16 */
140 u8 reserved1[2];
141 u16 pmc; /* abs.ofs: 20 */
142 u8 reserved2[20];
143 u8 mac_address[6]; /* abs.ofs: 42 */
144 u8 reserved3[58];
145 u16 board_revision; /* abs.ofs: 106 */
146 u8 reserved4[11];
147 u8 board_pba_number[9]; /* abs.ofs: 119 */
148 u8 reserved5[8];
149 u16 version; /* abs.ofs: 136 */
150 u8 sku_cap; /* abs.ofs: 138 */
151 u8 leds_mode; /* abs.ofs: 139 */
152 u16 oem_mode;
153 u16 wowlan_mode; /* abs.ofs: 142 */
154 u16 leds_time_interval; /* abs.ofs: 144 */
155 u8 leds_off_time; /* abs.ofs: 146 */
156 u8 leds_on_time; /* abs.ofs: 147 */
157 u8 almgor_m_version; /* abs.ofs: 148 */
158 u8 antenna_switch_type; /* abs.ofs: 149 */
159 u8 reserved6[42];
160 u8 sku_id[4]; /* abs.ofs: 192 */
161
162/*
163 * Per-channel regulatory data.
164 *
165 * Each channel that *might* be supported by 3945 has a fixed location
166 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
167 * txpower (MSB).
168 *
169 * Entries immediately below are for 20 MHz channel width.
170 *
171 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
172 */
173 u16 band_1_count; /* abs.ofs: 196 */
174 struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
175
176/*
177 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
178 * 5.0 GHz channels 7, 8, 11, 12, 16
179 * (4915-5080MHz) (none of these is ever supported)
180 */
181 u16 band_2_count; /* abs.ofs: 226 */
182 struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
183
184/*
185 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
186 * (5170-5320MHz)
187 */
188 u16 band_3_count; /* abs.ofs: 254 */
189 struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
190
191/*
192 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
193 * (5500-5700MHz)
194 */
195 u16 band_4_count; /* abs.ofs: 280 */
196 struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
197
198/*
199 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
200 * (5725-5825MHz)
201 */
202 u16 band_5_count; /* abs.ofs: 304 */
203 struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
204
205 u8 reserved9[194];
206
207/*
208 * 3945 Txpower calibration data.
209 */
210#define IWL_NUM_TX_CALIB_GROUPS 5
211 struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
212/* abs.ofs: 512 */
213 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
214 u8 reserved16[172]; /* fill out to full 1024 byte block */
215} __packed;
216
217#define IWL3945_EEPROM_IMG_SIZE 1024
218
219/* End of EEPROM */
220
221#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
222#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
223
224/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
225#define IWL39_NUM_QUEUES 5
226#define IWL39_CMD_QUEUE_NUM 4
227
228#define IWL_DEFAULT_TX_RETRY 15
229
230/*********************************************/
231
232#define RFD_SIZE 4
233#define NUM_TFD_CHUNKS 4
234
235#define RX_QUEUE_SIZE 256
236#define RX_QUEUE_MASK 255
237#define RX_QUEUE_SIZE_LOG 8
238
239#define U32_PAD(n) ((4-(n))&0x3)
240
241#define TFD_CTL_COUNT_SET(n) (n << 24)
242#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
243#define TFD_CTL_PAD_SET(n) (n << 28)
244#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
245
246/* Sizes and addresses for instruction and data memory (SRAM) in
247 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
248#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
249#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
250
251#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
252#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
253
254#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
255 IWL39_RTC_INST_LOWER_BOUND)
256#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
257 IWL39_RTC_DATA_LOWER_BOUND)
258
259#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
260#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
261
262/* Size of uCode instruction memory in bootstrap state machine */
263#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
264
265static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
266{
267 return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
268 (addr < IWL39_RTC_DATA_UPPER_BOUND);
269}
270
271/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
272 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
273struct iwl3945_shared {
274 __le32 tx_base_ptr[8];
275} __packed;
276
277static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
278{
279 return le16_to_cpu(rate_n_flags) & 0xFF;
280}
281
282static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
283{
284 return le16_to_cpu(rate_n_flags);
285}
286
287static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
288{
289 return cpu_to_le16((u16)rate|flags);
290}
291#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
deleted file mode 100644
index 7a7f0f38c8ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <net/mac80211.h>
36#include <linux/etherdevice.h>
37#include <asm/unaligned.h>
38
39#include "iwl-commands.h"
40#include "iwl-3945.h"
41#include "iwl-core.h"
42#include "iwl-dev.h"
43#include "iwl-3945-led.h"
44
45
46/* Send led command */
47static int iwl3945_send_led_cmd(struct iwl_priv *priv,
48 struct iwl_led_cmd *led_cmd)
49{
50 struct iwl_host_cmd cmd = {
51 .id = REPLY_LEDS_CMD,
52 .len = sizeof(struct iwl_led_cmd),
53 .data = led_cmd,
54 .flags = CMD_ASYNC,
55 .callback = NULL,
56 };
57
58 return iwl_legacy_send_cmd(priv, &cmd);
59}
60
61const struct iwl_led_ops iwl3945_led_ops = {
62 .cmd = iwl3945_send_led_cmd,
63};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
deleted file mode 100644
index 96716276eb0d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_3945_led_h__
28#define __iwl_3945_led_h__
29
30extern const struct iwl_led_ops iwl3945_led_ops;
31
32#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
deleted file mode 100644
index 8faeaf2dddec..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ /dev/null
@@ -1,996 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/skbuff.h>
30#include <linux/slab.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "iwl-commands.h"
40#include "iwl-3945.h"
41#include "iwl-sta.h"
42
43#define RS_NAME "iwl-3945-rs"
44
45static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
46 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
47};
48
49static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
50 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
51};
52
53static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
54 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
55};
56
57static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
58 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
59};
60
61struct iwl3945_tpt_entry {
62 s8 min_rssi;
63 u8 index;
64};
65
66static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
67 {-60, IWL_RATE_54M_INDEX},
68 {-64, IWL_RATE_48M_INDEX},
69 {-72, IWL_RATE_36M_INDEX},
70 {-80, IWL_RATE_24M_INDEX},
71 {-84, IWL_RATE_18M_INDEX},
72 {-85, IWL_RATE_12M_INDEX},
73 {-87, IWL_RATE_9M_INDEX},
74 {-89, IWL_RATE_6M_INDEX}
75};
76
77static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
78 {-60, IWL_RATE_54M_INDEX},
79 {-64, IWL_RATE_48M_INDEX},
80 {-68, IWL_RATE_36M_INDEX},
81 {-80, IWL_RATE_24M_INDEX},
82 {-84, IWL_RATE_18M_INDEX},
83 {-85, IWL_RATE_12M_INDEX},
84 {-86, IWL_RATE_11M_INDEX},
85 {-88, IWL_RATE_5M_INDEX},
86 {-90, IWL_RATE_2M_INDEX},
87 {-92, IWL_RATE_1M_INDEX}
88};
89
90#define IWL_RATE_MAX_WINDOW 62
91#define IWL_RATE_FLUSH (3*HZ)
92#define IWL_RATE_WIN_FLUSH (HZ/2)
93#define IWL39_RATE_HIGH_TH 11520
94#define IWL_SUCCESS_UP_TH 8960
95#define IWL_SUCCESS_DOWN_TH 10880
96#define IWL_RATE_MIN_FAILURE_TH 6
97#define IWL_RATE_MIN_SUCCESS_TH 8
98#define IWL_RATE_DECREASE_TH 1920
99#define IWL_RATE_RETRY_TH 15
100
101static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
102{
103 u32 index = 0;
104 u32 table_size = 0;
105 struct iwl3945_tpt_entry *tpt_table = NULL;
106
107 if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
108 rssi = IWL_MIN_RSSI_VAL;
109
110 switch (band) {
111 case IEEE80211_BAND_2GHZ:
112 tpt_table = iwl3945_tpt_table_g;
113 table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
114 break;
115
116 case IEEE80211_BAND_5GHZ:
117 tpt_table = iwl3945_tpt_table_a;
118 table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
119 break;
120
121 default:
122 BUG();
123 break;
124 }
125
126 while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
127 index++;
128
129 index = min(index, (table_size - 1));
130
131 return tpt_table[index].index;
132}
133
134static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
135{
136 window->data = 0;
137 window->success_counter = 0;
138 window->success_ratio = -1;
139 window->counter = 0;
140 window->average_tpt = IWL_INVALID_VALUE;
141 window->stamp = 0;
142}
143
144/**
145 * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
146 *
147 * Returns the number of windows that have gathered data but were
148 * not flushed. If there were any that were not flushed, then
149 * reschedule the rate flushing routine.
150 */
151static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
152{
153 int unflushed = 0;
154 int i;
155 unsigned long flags;
156 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
157
158 /*
159 * For each rate, if we have collected data on that rate
160 * and it has been more than IWL_RATE_WIN_FLUSH
161 * since we flushed, clear out the gathered statistics
162 */
163 for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
164 if (!rs_sta->win[i].counter)
165 continue;
166
167 spin_lock_irqsave(&rs_sta->lock, flags);
168 if (time_after(jiffies, rs_sta->win[i].stamp +
169 IWL_RATE_WIN_FLUSH)) {
170 IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
171 "index %d\n",
172 rs_sta->win[i].counter, i);
173 iwl3945_clear_window(&rs_sta->win[i]);
174 } else
175 unflushed++;
176 spin_unlock_irqrestore(&rs_sta->lock, flags);
177 }
178
179 return unflushed;
180}
181
182#define IWL_RATE_FLUSH_MAX 5000 /* msec */
183#define IWL_RATE_FLUSH_MIN 50 /* msec */
184#define IWL_AVERAGE_PACKETS 1500
185
186static void iwl3945_bg_rate_scale_flush(unsigned long data)
187{
188 struct iwl3945_rs_sta *rs_sta = (void *)data;
189 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
190 int unflushed = 0;
191 unsigned long flags;
192 u32 packet_count, duration, pps;
193
194 IWL_DEBUG_RATE(priv, "enter\n");
195
196 unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
197
198 spin_lock_irqsave(&rs_sta->lock, flags);
199
200 /* Number of packets Rx'd since last time this timer ran */
201 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
202
203 rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
204
205 if (unflushed) {
206 duration =
207 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
208
209 IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
210 packet_count, duration);
211
212 /* Determine packets per second */
213 if (duration)
214 pps = (packet_count * 1000) / duration;
215 else
216 pps = 0;
217
218 if (pps) {
219 duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
220 if (duration < IWL_RATE_FLUSH_MIN)
221 duration = IWL_RATE_FLUSH_MIN;
222 else if (duration > IWL_RATE_FLUSH_MAX)
223 duration = IWL_RATE_FLUSH_MAX;
224 } else
225 duration = IWL_RATE_FLUSH_MAX;
226
227 rs_sta->flush_time = msecs_to_jiffies(duration);
228
229 IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
230 duration, packet_count);
231
232 mod_timer(&rs_sta->rate_scale_flush, jiffies +
233 rs_sta->flush_time);
234
235 rs_sta->last_partial_flush = jiffies;
236 } else {
237 rs_sta->flush_time = IWL_RATE_FLUSH;
238 rs_sta->flush_pending = 0;
239 }
240 /* If there weren't any unflushed entries, we don't schedule the timer
241 * to run again */
242
243 rs_sta->last_flush = jiffies;
244
245 spin_unlock_irqrestore(&rs_sta->lock, flags);
246
247 IWL_DEBUG_RATE(priv, "leave\n");
248}
249
250/**
251 * iwl3945_collect_tx_data - Update the success/failure sliding window
252 *
253 * We keep a sliding window of the last 64 packets transmitted
254 * at this rate. window->data contains the bitmask of successful
255 * packets.
256 */
257static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
258 struct iwl3945_rate_scale_data *window,
259 int success, int retries, int index)
260{
261 unsigned long flags;
262 s32 fail_count;
263 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
264
265 if (!retries) {
266 IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
267 return;
268 }
269
270 spin_lock_irqsave(&rs_sta->lock, flags);
271
272 /*
273 * Keep track of only the latest 62 tx frame attempts in this rate's
274 * history window; anything older isn't really relevant any more.
275 * If we have filled up the sliding window, drop the oldest attempt;
276 * if the oldest attempt (highest bit in bitmap) shows "success",
277 * subtract "1" from the success counter (this is the main reason
278 * we keep these bitmaps!).
279 * */
280 while (retries > 0) {
281 if (window->counter >= IWL_RATE_MAX_WINDOW) {
282
283 /* remove earliest */
284 window->counter = IWL_RATE_MAX_WINDOW - 1;
285
286 if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
287 window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
288 window->success_counter--;
289 }
290 }
291
292 /* Increment frames-attempted counter */
293 window->counter++;
294
295 /* Shift bitmap by one frame (throw away oldest history),
296 * OR in "1", and increment "success" if this
297 * frame was successful. */
298 window->data <<= 1;
299 if (success > 0) {
300 window->success_counter++;
301 window->data |= 0x1;
302 success--;
303 }
304
305 retries--;
306 }
307
308 /* Calculate current success ratio, avoid divide-by-0! */
309 if (window->counter > 0)
310 window->success_ratio = 128 * (100 * window->success_counter)
311 / window->counter;
312 else
313 window->success_ratio = IWL_INVALID_VALUE;
314
315 fail_count = window->counter - window->success_counter;
316
317 /* Calculate average throughput, if we have enough history. */
318 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
319 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
320 window->average_tpt = ((window->success_ratio *
321 rs_sta->expected_tpt[index] + 64) / 128);
322 else
323 window->average_tpt = IWL_INVALID_VALUE;
324
325 /* Tag this window as having been updated */
326 window->stamp = jiffies;
327
328 spin_unlock_irqrestore(&rs_sta->lock, flags);
329
330}
331
332/*
333 * Called after adding a new station to initialize rate scaling
334 */
335void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
336{
337 struct ieee80211_hw *hw = priv->hw;
338 struct ieee80211_conf *conf = &priv->hw->conf;
339 struct iwl3945_sta_priv *psta;
340 struct iwl3945_rs_sta *rs_sta;
341 struct ieee80211_supported_band *sband;
342 int i;
343
344 IWL_DEBUG_INFO(priv, "enter\n");
345 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
346 goto out;
347
348 psta = (struct iwl3945_sta_priv *) sta->drv_priv;
349 rs_sta = &psta->rs_sta;
350 sband = hw->wiphy->bands[conf->channel->band];
351
352 rs_sta->priv = priv;
353
354 rs_sta->start_rate = IWL_RATE_INVALID;
355
356 /* default to just 802.11b */
357 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
358
359 rs_sta->last_partial_flush = jiffies;
360 rs_sta->last_flush = jiffies;
361 rs_sta->flush_time = IWL_RATE_FLUSH;
362 rs_sta->last_tx_packets = 0;
363
364 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
365 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
366
367 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
368 iwl3945_clear_window(&rs_sta->win[i]);
369
370 /* TODO: what is a good starting rate for STA? About middle? Maybe not
371 * the lowest or the highest rate.. Could consider using RSSI from
372 * previous packets? Need to have IEEE 802.1X auth succeed immediately
373 * after assoc.. */
374
375 for (i = sband->n_bitrates - 1; i >= 0; i--) {
376 if (sta->supp_rates[sband->band] & (1 << i)) {
377 rs_sta->last_txrate_idx = i;
378 break;
379 }
380 }
381
382 priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
383 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
384 if (sband->band == IEEE80211_BAND_5GHZ) {
385 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
386 priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
387 IWL_FIRST_OFDM_RATE;
388 }
389
390out:
391 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
392
393 IWL_DEBUG_INFO(priv, "leave\n");
394}
395
396static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
397{
398 return hw->priv;
399}
400
401/* rate scale requires free function to be implemented */
402static void iwl3945_rs_free(void *priv)
403{
404 return;
405}
406
407static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
408{
409 struct iwl3945_rs_sta *rs_sta;
410 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
411 struct iwl_priv *priv __maybe_unused = iwl_priv;
412
413 IWL_DEBUG_RATE(priv, "enter\n");
414
415 rs_sta = &psta->rs_sta;
416
417 spin_lock_init(&rs_sta->lock);
418 init_timer(&rs_sta->rate_scale_flush);
419
420 IWL_DEBUG_RATE(priv, "leave\n");
421
422 return rs_sta;
423}
424
425static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
426 void *priv_sta)
427{
428 struct iwl3945_rs_sta *rs_sta = priv_sta;
429
430 /*
431 * Be careful not to use any members of iwl3945_rs_sta (like trying
432 * to use iwl_priv to print out debugging) since it may not be fully
433 * initialized at this point.
434 */
435 del_timer_sync(&rs_sta->rate_scale_flush);
436}
437
438
439/**
440 * iwl3945_rs_tx_status - Update rate control values based on Tx results
441 *
442 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
443 * the hardware for each rate.
444 */
445static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
446 struct ieee80211_sta *sta, void *priv_sta,
447 struct sk_buff *skb)
448{
449 s8 retries = 0, current_count;
450 int scale_rate_index, first_index, last_index;
451 unsigned long flags;
452 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
453 struct iwl3945_rs_sta *rs_sta = priv_sta;
454 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
455
456 IWL_DEBUG_RATE(priv, "enter\n");
457
458 retries = info->status.rates[0].count;
459 /* Sanity Check for retries */
460 if (retries > IWL_RATE_RETRY_TH)
461 retries = IWL_RATE_RETRY_TH;
462
463 first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
464 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
465 IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
466 return;
467 }
468
469 if (!priv_sta) {
470 IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
471 return;
472 }
473
474 /* Treat uninitialized rate scaling data same as non-existing. */
475 if (!rs_sta->priv) {
476 IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
477 return;
478 }
479
480
481 rs_sta->tx_packets++;
482
483 scale_rate_index = first_index;
484 last_index = first_index;
485
486 /*
487 * Update the window for each rate. We determine which rates
488 * were Tx'd based on the total number of retries vs. the number
489 * of retries configured for each rate -- currently set to the
490 * priv value 'retry_rate' vs. rate specific
491 *
492 * On exit from this while loop last_index indicates the rate
493 * at which the frame was finally transmitted (or failed if no
494 * ACK)
495 */
496 while (retries > 1) {
497 if ((retries - 1) < priv->retry_rate) {
498 current_count = (retries - 1);
499 last_index = scale_rate_index;
500 } else {
501 current_count = priv->retry_rate;
502 last_index = iwl3945_rs_next_rate(priv,
503 scale_rate_index);
504 }
505
506 /* Update this rate accounting for as many retries
507 * as was used for it (per current_count) */
508 iwl3945_collect_tx_data(rs_sta,
509 &rs_sta->win[scale_rate_index],
510 0, current_count, scale_rate_index);
511 IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
512 scale_rate_index, current_count);
513
514 retries -= current_count;
515
516 scale_rate_index = last_index;
517 }
518
519
520 /* Update the last index window with success/failure based on ACK */
521 IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
522 last_index,
523 (info->flags & IEEE80211_TX_STAT_ACK) ?
524 "success" : "failure");
525 iwl3945_collect_tx_data(rs_sta,
526 &rs_sta->win[last_index],
527 info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
528
529 /* We updated the rate scale window -- if its been more than
530 * flush_time since the last run, schedule the flush
531 * again */
532 spin_lock_irqsave(&rs_sta->lock, flags);
533
534 if (!rs_sta->flush_pending &&
535 time_after(jiffies, rs_sta->last_flush +
536 rs_sta->flush_time)) {
537
538 rs_sta->last_partial_flush = jiffies;
539 rs_sta->flush_pending = 1;
540 mod_timer(&rs_sta->rate_scale_flush,
541 jiffies + rs_sta->flush_time);
542 }
543
544 spin_unlock_irqrestore(&rs_sta->lock, flags);
545
546 IWL_DEBUG_RATE(priv, "leave\n");
547}
548
549static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
550 u8 index, u16 rate_mask, enum ieee80211_band band)
551{
552 u8 high = IWL_RATE_INVALID;
553 u8 low = IWL_RATE_INVALID;
554 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
555
556 /* 802.11A walks to the next literal adjacent rate in
557 * the rate table */
558 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
559 int i;
560 u32 mask;
561
562 /* Find the previous rate that is in the rate mask */
563 i = index - 1;
564 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
565 if (rate_mask & mask) {
566 low = i;
567 break;
568 }
569 }
570
571 /* Find the next rate that is in the rate mask */
572 i = index + 1;
573 for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
574 i++, mask <<= 1) {
575 if (rate_mask & mask) {
576 high = i;
577 break;
578 }
579 }
580
581 return (high << 8) | low;
582 }
583
584 low = index;
585 while (low != IWL_RATE_INVALID) {
586 if (rs_sta->tgg)
587 low = iwl3945_rates[low].prev_rs_tgg;
588 else
589 low = iwl3945_rates[low].prev_rs;
590 if (low == IWL_RATE_INVALID)
591 break;
592 if (rate_mask & (1 << low))
593 break;
594 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
595 }
596
597 high = index;
598 while (high != IWL_RATE_INVALID) {
599 if (rs_sta->tgg)
600 high = iwl3945_rates[high].next_rs_tgg;
601 else
602 high = iwl3945_rates[high].next_rs;
603 if (high == IWL_RATE_INVALID)
604 break;
605 if (rate_mask & (1 << high))
606 break;
607 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
608 }
609
610 return (high << 8) | low;
611}
612
613/**
614 * iwl3945_rs_get_rate - find the rate for the requested packet
615 *
616 * Returns the ieee80211_rate structure allocated by the driver.
617 *
618 * The rate control algorithm has no internal mapping between hw_mode's
619 * rate ordering and the rate ordering used by the rate control algorithm.
620 *
621 * The rate control algorithm uses a single table of rates that goes across
622 * the entire A/B/G spectrum vs. being limited to just one particular
623 * hw_mode.
624 *
625 * As such, we can't convert the index obtained below into the hw_mode's
626 * rate table and must reference the driver allocated rate table
627 *
628 */
629static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
630 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
631{
632 struct ieee80211_supported_band *sband = txrc->sband;
633 struct sk_buff *skb = txrc->skb;
634 u8 low = IWL_RATE_INVALID;
635 u8 high = IWL_RATE_INVALID;
636 u16 high_low;
637 int index;
638 struct iwl3945_rs_sta *rs_sta = priv_sta;
639 struct iwl3945_rate_scale_data *window = NULL;
640 int current_tpt = IWL_INVALID_VALUE;
641 int low_tpt = IWL_INVALID_VALUE;
642 int high_tpt = IWL_INVALID_VALUE;
643 u32 fail_count;
644 s8 scale_action = 0;
645 unsigned long flags;
646 u16 rate_mask;
647 s8 max_rate_idx = -1;
648 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
649 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
650
651 IWL_DEBUG_RATE(priv, "enter\n");
652
653 /* Treat uninitialized rate scaling data same as non-existing. */
654 if (rs_sta && !rs_sta->priv) {
655 IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
656 priv_sta = NULL;
657 }
658
659 if (rate_control_send_low(sta, priv_sta, txrc))
660 return;
661
662 rate_mask = sta->supp_rates[sband->band];
663
664 /* get user max rate if set */
665 max_rate_idx = txrc->max_rate_idx;
666 if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
667 max_rate_idx += IWL_FIRST_OFDM_RATE;
668 if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
669 max_rate_idx = -1;
670
671 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
672
673 if (sband->band == IEEE80211_BAND_5GHZ)
674 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
675
676 spin_lock_irqsave(&rs_sta->lock, flags);
677
678 /* for recent assoc, choose best rate regarding
679 * to rssi value
680 */
681 if (rs_sta->start_rate != IWL_RATE_INVALID) {
682 if (rs_sta->start_rate < index &&
683 (rate_mask & (1 << rs_sta->start_rate)))
684 index = rs_sta->start_rate;
685 rs_sta->start_rate = IWL_RATE_INVALID;
686 }
687
688 /* force user max rate if set by user */
689 if ((max_rate_idx != -1) && (max_rate_idx < index)) {
690 if (rate_mask & (1 << max_rate_idx))
691 index = max_rate_idx;
692 }
693
694 window = &(rs_sta->win[index]);
695
696 fail_count = window->counter - window->success_counter;
697
698 if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
699 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
700 spin_unlock_irqrestore(&rs_sta->lock, flags);
701
702 IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
703 "counter: %d, success_counter: %d, "
704 "expected_tpt is %sNULL\n",
705 index,
706 window->counter,
707 window->success_counter,
708 rs_sta->expected_tpt ? "not " : "");
709
710 /* Can't calculate this yet; not enough history */
711 window->average_tpt = IWL_INVALID_VALUE;
712 goto out;
713
714 }
715
716 current_tpt = window->average_tpt;
717
718 high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
719 sband->band);
720 low = high_low & 0xff;
721 high = (high_low >> 8) & 0xff;
722
723 /* If user set max rate, dont allow higher than user constrain */
724 if ((max_rate_idx != -1) && (max_rate_idx < high))
725 high = IWL_RATE_INVALID;
726
727 /* Collect Measured throughputs of adjacent rates */
728 if (low != IWL_RATE_INVALID)
729 low_tpt = rs_sta->win[low].average_tpt;
730
731 if (high != IWL_RATE_INVALID)
732 high_tpt = rs_sta->win[high].average_tpt;
733
734 spin_unlock_irqrestore(&rs_sta->lock, flags);
735
736 scale_action = 0;
737
738 /* Low success ratio , need to drop the rate */
739 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
740 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
741 scale_action = -1;
742 /* No throughput measured yet for adjacent rates,
743 * try increase */
744 } else if ((low_tpt == IWL_INVALID_VALUE) &&
745 (high_tpt == IWL_INVALID_VALUE)) {
746
747 if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
748 scale_action = 1;
749 else if (low != IWL_RATE_INVALID)
750 scale_action = 0;
751
752 /* Both adjacent throughputs are measured, but neither one has
753 * better throughput; we're using the best rate, don't change
754 * it! */
755 } else if ((low_tpt != IWL_INVALID_VALUE) &&
756 (high_tpt != IWL_INVALID_VALUE) &&
757 (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
758
759 IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
760 "current_tpt [%d]\n",
761 low_tpt, high_tpt, current_tpt);
762 scale_action = 0;
763
764 /* At least one of the rates has better throughput */
765 } else {
766 if (high_tpt != IWL_INVALID_VALUE) {
767
768 /* High rate has better throughput, Increase
769 * rate */
770 if (high_tpt > current_tpt &&
771 window->success_ratio >= IWL_RATE_INCREASE_TH)
772 scale_action = 1;
773 else {
774 IWL_DEBUG_RATE(priv,
775 "decrease rate because of high tpt\n");
776 scale_action = 0;
777 }
778 } else if (low_tpt != IWL_INVALID_VALUE) {
779 if (low_tpt > current_tpt) {
780 IWL_DEBUG_RATE(priv,
781 "decrease rate because of low tpt\n");
782 scale_action = -1;
783 } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
784 /* Lower rate has better
785 * throughput,decrease rate */
786 scale_action = 1;
787 }
788 }
789 }
790
791 /* Sanity check; asked for decrease, but success rate or throughput
792 * has been good at old rate. Don't change it. */
793 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
794 ((window->success_ratio > IWL_RATE_HIGH_TH) ||
795 (current_tpt > (100 * rs_sta->expected_tpt[low]))))
796 scale_action = 0;
797
798 switch (scale_action) {
799 case -1:
800
801 /* Decrese rate */
802 if (low != IWL_RATE_INVALID)
803 index = low;
804 break;
805
806 case 1:
807 /* Increase rate */
808 if (high != IWL_RATE_INVALID)
809 index = high;
810
811 break;
812
813 case 0:
814 default:
815 /* No change */
816 break;
817 }
818
819 IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
820 index, scale_action, low, high);
821
822 out:
823
824 if (sband->band == IEEE80211_BAND_5GHZ) {
825 if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
826 index = IWL_FIRST_OFDM_RATE;
827 rs_sta->last_txrate_idx = index;
828 info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
829 } else {
830 rs_sta->last_txrate_idx = index;
831 info->control.rates[0].idx = rs_sta->last_txrate_idx;
832 }
833
834 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
835}
836
837#ifdef CONFIG_MAC80211_DEBUGFS
838static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
839{
840 file->private_data = inode->i_private;
841 return 0;
842}
843
844static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
845 char __user *user_buf,
846 size_t count, loff_t *ppos)
847{
848 char *buff;
849 int desc = 0;
850 int j;
851 ssize_t ret;
852 struct iwl3945_rs_sta *lq_sta = file->private_data;
853
854 buff = kmalloc(1024, GFP_KERNEL);
855 if (!buff)
856 return -ENOMEM;
857
858 desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
859 "rate=0x%X flush time %d\n",
860 lq_sta->tx_packets,
861 lq_sta->last_txrate_idx,
862 lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
863 for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
864 desc += sprintf(buff+desc,
865 "counter=%d success=%d %%=%d\n",
866 lq_sta->win[j].counter,
867 lq_sta->win[j].success_counter,
868 lq_sta->win[j].success_ratio);
869 }
870 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
871 kfree(buff);
872 return ret;
873}
874
875static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
876 .read = iwl3945_sta_dbgfs_stats_table_read,
877 .open = iwl3945_open_file_generic,
878 .llseek = default_llseek,
879};
880
881static void iwl3945_add_debugfs(void *priv, void *priv_sta,
882 struct dentry *dir)
883{
884 struct iwl3945_rs_sta *lq_sta = priv_sta;
885
886 lq_sta->rs_sta_dbgfs_stats_table_file =
887 debugfs_create_file("rate_stats_table", 0600, dir,
888 lq_sta, &rs_sta_dbgfs_stats_table_ops);
889
890}
891
892static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
893{
894 struct iwl3945_rs_sta *lq_sta = priv_sta;
895 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
896}
897#endif
898
899/*
900 * Initialization of rate scaling information is done by driver after
901 * the station is added. Since mac80211 calls this function before a
902 * station is added we ignore it.
903 */
904static void iwl3945_rs_rate_init_stub(void *priv_r,
905 struct ieee80211_supported_band *sband,
906 struct ieee80211_sta *sta, void *priv_sta)
907{
908}
909
910static struct rate_control_ops rs_ops = {
911 .module = NULL,
912 .name = RS_NAME,
913 .tx_status = iwl3945_rs_tx_status,
914 .get_rate = iwl3945_rs_get_rate,
915 .rate_init = iwl3945_rs_rate_init_stub,
916 .alloc = iwl3945_rs_alloc,
917 .free = iwl3945_rs_free,
918 .alloc_sta = iwl3945_rs_alloc_sta,
919 .free_sta = iwl3945_rs_free_sta,
920#ifdef CONFIG_MAC80211_DEBUGFS
921 .add_sta_debugfs = iwl3945_add_debugfs,
922 .remove_sta_debugfs = iwl3945_remove_debugfs,
923#endif
924
925};
926void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
927{
928 struct iwl_priv *priv = hw->priv;
929 s32 rssi = 0;
930 unsigned long flags;
931 struct iwl3945_rs_sta *rs_sta;
932 struct ieee80211_sta *sta;
933 struct iwl3945_sta_priv *psta;
934
935 IWL_DEBUG_RATE(priv, "enter\n");
936
937 rcu_read_lock();
938
939 sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
940 priv->stations[sta_id].sta.sta.addr);
941 if (!sta) {
942 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
943 rcu_read_unlock();
944 return;
945 }
946
947 psta = (void *) sta->drv_priv;
948 rs_sta = &psta->rs_sta;
949
950 spin_lock_irqsave(&rs_sta->lock, flags);
951
952 rs_sta->tgg = 0;
953 switch (priv->band) {
954 case IEEE80211_BAND_2GHZ:
955 /* TODO: this always does G, not a regression */
956 if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
957 RXON_FLG_TGG_PROTECT_MSK) {
958 rs_sta->tgg = 1;
959 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
960 } else
961 rs_sta->expected_tpt = iwl3945_expected_tpt_g;
962 break;
963
964 case IEEE80211_BAND_5GHZ:
965 rs_sta->expected_tpt = iwl3945_expected_tpt_a;
966 break;
967 case IEEE80211_NUM_BANDS:
968 BUG();
969 break;
970 }
971
972 spin_unlock_irqrestore(&rs_sta->lock, flags);
973
974 rssi = priv->_3945.last_rx_rssi;
975 if (rssi == 0)
976 rssi = IWL_MIN_RSSI_VAL;
977
978 IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
979
980 rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
981
982 IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
983 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
984 iwl3945_rates[rs_sta->start_rate].plcp);
985 rcu_read_unlock();
986}
987
988int iwl3945_rate_control_register(void)
989{
990 return ieee80211_rate_control_register(&rs_ops);
991}
992
993void iwl3945_rate_control_unregister(void)
994{
995 ieee80211_rate_control_unregister(&rs_ops);
996}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
deleted file mode 100644
index f7c0a7438476..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ /dev/null
@@ -1,2741 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/sched.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/firmware.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40#include <net/mac80211.h>
41
42#include "iwl-fh.h"
43#include "iwl-3945-fh.h"
44#include "iwl-commands.h"
45#include "iwl-sta.h"
46#include "iwl-3945.h"
47#include "iwl-eeprom.h"
48#include "iwl-core.h"
49#include "iwl-helpers.h"
50#include "iwl-led.h"
51#include "iwl-3945-led.h"
52#include "iwl-3945-debugfs.h"
53
54#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
55 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
56 IWL_RATE_##r##M_IEEE, \
57 IWL_RATE_##ip##M_INDEX, \
58 IWL_RATE_##in##M_INDEX, \
59 IWL_RATE_##rp##M_INDEX, \
60 IWL_RATE_##rn##M_INDEX, \
61 IWL_RATE_##pp##M_INDEX, \
62 IWL_RATE_##np##M_INDEX, \
63 IWL_RATE_##r##M_INDEX_TABLE, \
64 IWL_RATE_##ip##M_INDEX_TABLE }
65
66/*
67 * Parameter order:
68 * rate, prev rate, next rate, prev tgg rate, next tgg rate
69 *
70 * If there isn't a valid next or previous rate then INV is used which
71 * maps to IWL_RATE_INVALID
72 *
73 */
74const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
75 IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
76 IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
77 IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
78 IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
79 IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
80 IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
81 IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
82 IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
83 IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
84 IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
85 IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
86 IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
87};
88
89static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
90{
91 u8 rate = iwl3945_rates[rate_index].prev_ieee;
92
93 if (rate == IWL_RATE_INVALID)
94 rate = rate_index;
95 return rate;
96}
97
98/* 1 = enable the iwl3945_disable_events() function */
99#define IWL_EVT_DISABLE (0)
100#define IWL_EVT_DISABLE_SIZE (1532/32)
101
102/**
103 * iwl3945_disable_events - Disable selected events in uCode event log
104 *
105 * Disable an event by writing "1"s into "disable"
106 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
107 * Default values of 0 enable uCode events to be logged.
108 * Use for only special debugging. This function is just a placeholder as-is,
109 * you'll need to provide the special bits! ...
110 * ... and set IWL_EVT_DISABLE to 1. */
111void iwl3945_disable_events(struct iwl_priv *priv)
112{
113 int i;
114 u32 base; /* SRAM address of event log header */
115 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
116 u32 array_size; /* # of u32 entries in array */
117 static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
118 0x00000000, /* 31 - 0 Event id numbers */
119 0x00000000, /* 63 - 32 */
120 0x00000000, /* 95 - 64 */
121 0x00000000, /* 127 - 96 */
122 0x00000000, /* 159 - 128 */
123 0x00000000, /* 191 - 160 */
124 0x00000000, /* 223 - 192 */
125 0x00000000, /* 255 - 224 */
126 0x00000000, /* 287 - 256 */
127 0x00000000, /* 319 - 288 */
128 0x00000000, /* 351 - 320 */
129 0x00000000, /* 383 - 352 */
130 0x00000000, /* 415 - 384 */
131 0x00000000, /* 447 - 416 */
132 0x00000000, /* 479 - 448 */
133 0x00000000, /* 511 - 480 */
134 0x00000000, /* 543 - 512 */
135 0x00000000, /* 575 - 544 */
136 0x00000000, /* 607 - 576 */
137 0x00000000, /* 639 - 608 */
138 0x00000000, /* 671 - 640 */
139 0x00000000, /* 703 - 672 */
140 0x00000000, /* 735 - 704 */
141 0x00000000, /* 767 - 736 */
142 0x00000000, /* 799 - 768 */
143 0x00000000, /* 831 - 800 */
144 0x00000000, /* 863 - 832 */
145 0x00000000, /* 895 - 864 */
146 0x00000000, /* 927 - 896 */
147 0x00000000, /* 959 - 928 */
148 0x00000000, /* 991 - 960 */
149 0x00000000, /* 1023 - 992 */
150 0x00000000, /* 1055 - 1024 */
151 0x00000000, /* 1087 - 1056 */
152 0x00000000, /* 1119 - 1088 */
153 0x00000000, /* 1151 - 1120 */
154 0x00000000, /* 1183 - 1152 */
155 0x00000000, /* 1215 - 1184 */
156 0x00000000, /* 1247 - 1216 */
157 0x00000000, /* 1279 - 1248 */
158 0x00000000, /* 1311 - 1280 */
159 0x00000000, /* 1343 - 1312 */
160 0x00000000, /* 1375 - 1344 */
161 0x00000000, /* 1407 - 1376 */
162 0x00000000, /* 1439 - 1408 */
163 0x00000000, /* 1471 - 1440 */
164 0x00000000, /* 1503 - 1472 */
165 };
166
167 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
168 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
169 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
170 return;
171 }
172
173 disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
174 array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
175
176 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
177 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
178 disable_ptr);
179 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
180 iwl_legacy_write_targ_mem(priv,
181 disable_ptr + (i * sizeof(u32)),
182 evt_disable[i]);
183
184 } else {
185 IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
186 IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
187 IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
188 disable_ptr, array_size);
189 }
190
191}
192
193static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
194{
195 int idx;
196
197 for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
198 if (iwl3945_rates[idx].plcp == plcp)
199 return idx;
200 return -1;
201}
202
203#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
204#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
205
206static const char *iwl3945_get_tx_fail_reason(u32 status)
207{
208 switch (status & TX_STATUS_MSK) {
209 case TX_3945_STATUS_SUCCESS:
210 return "SUCCESS";
211 TX_STATUS_ENTRY(SHORT_LIMIT);
212 TX_STATUS_ENTRY(LONG_LIMIT);
213 TX_STATUS_ENTRY(FIFO_UNDERRUN);
214 TX_STATUS_ENTRY(MGMNT_ABORT);
215 TX_STATUS_ENTRY(NEXT_FRAG);
216 TX_STATUS_ENTRY(LIFE_EXPIRE);
217 TX_STATUS_ENTRY(DEST_PS);
218 TX_STATUS_ENTRY(ABORTED);
219 TX_STATUS_ENTRY(BT_RETRY);
220 TX_STATUS_ENTRY(STA_INVALID);
221 TX_STATUS_ENTRY(FRAG_DROPPED);
222 TX_STATUS_ENTRY(TID_DISABLE);
223 TX_STATUS_ENTRY(FRAME_FLUSHED);
224 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
225 TX_STATUS_ENTRY(TX_LOCKED);
226 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
227 }
228
229 return "UNKNOWN";
230}
231#else
232static inline const char *iwl3945_get_tx_fail_reason(u32 status)
233{
234 return "";
235}
236#endif
237
238/*
239 * get ieee prev rate from rate scale table.
240 * for A and B mode we need to overright prev
241 * value
242 */
243int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
244{
245 int next_rate = iwl3945_get_prev_ieee_rate(rate);
246
247 switch (priv->band) {
248 case IEEE80211_BAND_5GHZ:
249 if (rate == IWL_RATE_12M_INDEX)
250 next_rate = IWL_RATE_9M_INDEX;
251 else if (rate == IWL_RATE_6M_INDEX)
252 next_rate = IWL_RATE_6M_INDEX;
253 break;
254 case IEEE80211_BAND_2GHZ:
255 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
256 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
257 if (rate == IWL_RATE_11M_INDEX)
258 next_rate = IWL_RATE_5M_INDEX;
259 }
260 break;
261
262 default:
263 break;
264 }
265
266 return next_rate;
267}
268
269
270/**
271 * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
272 *
273 * When FW advances 'R' index, all entries between old and new 'R' index
274 * need to be reclaimed. As result, some free space forms. If there is
275 * enough free space (> low mark), wake the stack that feeds us.
276 */
277static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
278 int txq_id, int index)
279{
280 struct iwl_tx_queue *txq = &priv->txq[txq_id];
281 struct iwl_queue *q = &txq->q;
282 struct iwl_tx_info *tx_info;
283
284 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
285
286 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
287 q->read_ptr != index;
288 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
289
290 tx_info = &txq->txb[txq->q.read_ptr];
291 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
292 tx_info->skb = NULL;
293 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
294 }
295
296 if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
297 (txq_id != IWL39_CMD_QUEUE_NUM) &&
298 priv->mac80211_registered)
299 iwl_legacy_wake_queue(priv, txq);
300}
301
302/**
303 * iwl3945_rx_reply_tx - Handle Tx response
304 */
305static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
306 struct iwl_rx_mem_buffer *rxb)
307{
308 struct iwl_rx_packet *pkt = rxb_addr(rxb);
309 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
310 int txq_id = SEQ_TO_QUEUE(sequence);
311 int index = SEQ_TO_INDEX(sequence);
312 struct iwl_tx_queue *txq = &priv->txq[txq_id];
313 struct ieee80211_tx_info *info;
314 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
315 u32 status = le32_to_cpu(tx_resp->status);
316 int rate_idx;
317 int fail;
318
319 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
320 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
321 "is out of range [0-%d] %d %d\n", txq_id,
322 index, txq->q.n_bd, txq->q.write_ptr,
323 txq->q.read_ptr);
324 return;
325 }
326
327 txq->time_stamp = jiffies;
328 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
329 ieee80211_tx_info_clear_status(info);
330
331 /* Fill the MRR chain with some info about on-chip retransmissions */
332 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
333 if (info->band == IEEE80211_BAND_5GHZ)
334 rate_idx -= IWL_FIRST_OFDM_RATE;
335
336 fail = tx_resp->failure_frame;
337
338 info->status.rates[0].idx = rate_idx;
339 info->status.rates[0].count = fail + 1; /* add final attempt */
340
341 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
342 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
343 IEEE80211_TX_STAT_ACK : 0;
344
345 IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
346 txq_id, iwl3945_get_tx_fail_reason(status), status,
347 tx_resp->rate, tx_resp->failure_frame);
348
349 IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
350 iwl3945_tx_queue_reclaim(priv, txq_id, index);
351
352 if (status & TX_ABORT_REQUIRED_MSK)
353 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
354}
355
356
357
358/*****************************************************************************
359 *
360 * Intel PRO/Wireless 3945ABG/BG Network Connection
361 *
362 * RX handler implementations
363 *
364 *****************************************************************************/
365#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
366static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
367 __le32 *stats)
368{
369 int i;
370 __le32 *prev_stats;
371 u32 *accum_stats;
372 u32 *delta, *max_delta;
373
374 prev_stats = (__le32 *)&priv->_3945.statistics;
375 accum_stats = (u32 *)&priv->_3945.accum_statistics;
376 delta = (u32 *)&priv->_3945.delta_statistics;
377 max_delta = (u32 *)&priv->_3945.max_delta;
378
379 for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
380 i += sizeof(__le32), stats++, prev_stats++, delta++,
381 max_delta++, accum_stats++) {
382 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
383 *delta = (le32_to_cpu(*stats) -
384 le32_to_cpu(*prev_stats));
385 *accum_stats += *delta;
386 if (*delta > *max_delta)
387 *max_delta = *delta;
388 }
389 }
390
391 /* reset accumulative statistics for "no-counter" type statistics */
392 priv->_3945.accum_statistics.general.temperature =
393 priv->_3945.statistics.general.temperature;
394 priv->_3945.accum_statistics.general.ttl_timestamp =
395 priv->_3945.statistics.general.ttl_timestamp;
396}
397#endif
398
399void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
400 struct iwl_rx_mem_buffer *rxb)
401{
402 struct iwl_rx_packet *pkt = rxb_addr(rxb);
403
404 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
405 (int)sizeof(struct iwl3945_notif_statistics),
406 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
407#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
408 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
409#endif
410
411 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
412}
413
414void iwl3945_reply_statistics(struct iwl_priv *priv,
415 struct iwl_rx_mem_buffer *rxb)
416{
417 struct iwl_rx_packet *pkt = rxb_addr(rxb);
418 __le32 *flag = (__le32 *)&pkt->u.raw;
419
420 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
421#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
422 memset(&priv->_3945.accum_statistics, 0,
423 sizeof(struct iwl3945_notif_statistics));
424 memset(&priv->_3945.delta_statistics, 0,
425 sizeof(struct iwl3945_notif_statistics));
426 memset(&priv->_3945.max_delta, 0,
427 sizeof(struct iwl3945_notif_statistics));
428#endif
429 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
430 }
431 iwl3945_hw_rx_statistics(priv, rxb);
432}
433
434
435/******************************************************************************
436 *
437 * Misc. internal state and helper functions
438 *
439 ******************************************************************************/
440
441/* This is necessary only for a number of statistics, see the caller. */
442static int iwl3945_is_network_packet(struct iwl_priv *priv,
443 struct ieee80211_hdr *header)
444{
445 /* Filter incoming packets to determine if they are targeted toward
446 * this network, discarding packets coming from ourselves */
447 switch (priv->iw_mode) {
448 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
449 /* packets to our IBSS update information */
450 return !compare_ether_addr(header->addr3, priv->bssid);
451 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
452 /* packets to our IBSS update information */
453 return !compare_ether_addr(header->addr2, priv->bssid);
454 default:
455 return 1;
456 }
457}
458
459static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
460 struct iwl_rx_mem_buffer *rxb,
461 struct ieee80211_rx_status *stats)
462{
463 struct iwl_rx_packet *pkt = rxb_addr(rxb);
464 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
465 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
466 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
467 u16 len = le16_to_cpu(rx_hdr->len);
468 struct sk_buff *skb;
469 __le16 fc = hdr->frame_control;
470
471 /* We received data from the HW, so stop the watchdog */
472 if (unlikely(len + IWL39_RX_FRAME_SIZE >
473 PAGE_SIZE << priv->hw_params.rx_page_order)) {
474 IWL_DEBUG_DROP(priv, "Corruption detected!\n");
475 return;
476 }
477
478 /* We only process data packets if the interface is open */
479 if (unlikely(!priv->is_open)) {
480 IWL_DEBUG_DROP_LIMIT(priv,
481 "Dropping packet while interface is not open.\n");
482 return;
483 }
484
485 skb = dev_alloc_skb(128);
486 if (!skb) {
487 IWL_ERR(priv, "dev_alloc_skb failed\n");
488 return;
489 }
490
491 if (!iwl3945_mod_params.sw_crypto)
492 iwl_legacy_set_decrypted_flag(priv,
493 (struct ieee80211_hdr *)rxb_addr(rxb),
494 le32_to_cpu(rx_end->status), stats);
495
496 skb_add_rx_frag(skb, 0, rxb->page,
497 (void *)rx_hdr->payload - (void *)pkt, len);
498
499 iwl_legacy_update_stats(priv, false, fc, len);
500 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
501
502 ieee80211_rx(priv->hw, skb);
503 priv->alloc_rxb_page--;
504 rxb->page = NULL;
505}
506
507#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
508
509static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
510 struct iwl_rx_mem_buffer *rxb)
511{
512 struct ieee80211_hdr *header;
513 struct ieee80211_rx_status rx_status;
514 struct iwl_rx_packet *pkt = rxb_addr(rxb);
515 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
516 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
517 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
518 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
519 u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
520 u8 network_packet;
521
522 rx_status.flag = 0;
523 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
524 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
525 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
526 rx_status.freq =
527 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
528 rx_status.band);
529
530 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
531 if (rx_status.band == IEEE80211_BAND_5GHZ)
532 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
533
534 rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
535 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
536
537 /* set the preamble flag if appropriate */
538 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
539 rx_status.flag |= RX_FLAG_SHORTPRE;
540
541 if ((unlikely(rx_stats->phy_count > 20))) {
542 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
543 rx_stats->phy_count);
544 return;
545 }
546
547 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
548 || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
549 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
550 return;
551 }
552
553
554
555 /* Convert 3945's rssi indicator to dBm */
556 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
557
558 IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
559 rx_status.signal, rx_stats_sig_avg,
560 rx_stats_noise_diff);
561
562 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
563
564 network_packet = iwl3945_is_network_packet(priv, header);
565
566 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
567 network_packet ? '*' : ' ',
568 le16_to_cpu(rx_hdr->channel),
569 rx_status.signal, rx_status.signal,
570 rx_status.rate_idx);
571
572 iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
573 header);
574
575 if (network_packet) {
576 priv->_3945.last_beacon_time =
577 le32_to_cpu(rx_end->beacon_timestamp);
578 priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
579 priv->_3945.last_rx_rssi = rx_status.signal;
580 }
581
582 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
583}
584
585int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
586 struct iwl_tx_queue *txq,
587 dma_addr_t addr, u16 len, u8 reset, u8 pad)
588{
589 int count;
590 struct iwl_queue *q;
591 struct iwl3945_tfd *tfd, *tfd_tmp;
592
593 q = &txq->q;
594 tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
595 tfd = &tfd_tmp[q->write_ptr];
596
597 if (reset)
598 memset(tfd, 0, sizeof(*tfd));
599
600 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
601
602 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
603 IWL_ERR(priv, "Error can not send more than %d chunks\n",
604 NUM_TFD_CHUNKS);
605 return -EINVAL;
606 }
607
608 tfd->tbs[count].addr = cpu_to_le32(addr);
609 tfd->tbs[count].len = cpu_to_le32(len);
610
611 count++;
612
613 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
614 TFD_CTL_PAD_SET(pad));
615
616 return 0;
617}
618
619/**
620 * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
621 *
622 * Does NOT advance any indexes
623 */
624void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
625{
626 struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
627 int index = txq->q.read_ptr;
628 struct iwl3945_tfd *tfd = &tfd_tmp[index];
629 struct pci_dev *dev = priv->pci_dev;
630 int i;
631 int counter;
632
633 /* sanity check */
634 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
635 if (counter > NUM_TFD_CHUNKS) {
636 IWL_ERR(priv, "Too many chunks: %i\n", counter);
637 /* @todo issue fatal error, it is quite serious situation */
638 return;
639 }
640
641 /* Unmap tx_cmd */
642 if (counter)
643 pci_unmap_single(dev,
644 dma_unmap_addr(&txq->meta[index], mapping),
645 dma_unmap_len(&txq->meta[index], len),
646 PCI_DMA_TODEVICE);
647
648 /* unmap chunks if any */
649
650 for (i = 1; i < counter; i++)
651 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
652 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
653
654 /* free SKB */
655 if (txq->txb) {
656 struct sk_buff *skb;
657
658 skb = txq->txb[txq->q.read_ptr].skb;
659
660 /* can be called from irqs-disabled context */
661 if (skb) {
662 dev_kfree_skb_any(skb);
663 txq->txb[txq->q.read_ptr].skb = NULL;
664 }
665 }
666}
667
668/**
669 * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
670 *
671*/
672void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
673 struct iwl_device_cmd *cmd,
674 struct ieee80211_tx_info *info,
675 struct ieee80211_hdr *hdr,
676 int sta_id, int tx_id)
677{
678 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
679 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
680 u16 rate_mask;
681 int rate;
682 u8 rts_retry_limit;
683 u8 data_retry_limit;
684 __le32 tx_flags;
685 __le16 fc = hdr->frame_control;
686 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
687
688 rate = iwl3945_rates[rate_index].plcp;
689 tx_flags = tx_cmd->tx_flags;
690
691 /* We need to figure out how to get the sta->supp_rates while
692 * in this running context */
693 rate_mask = IWL_RATES_MASK_3945;
694
695 /* Set retry limit on DATA packets and Probe Responses*/
696 if (ieee80211_is_probe_resp(fc))
697 data_retry_limit = 3;
698 else
699 data_retry_limit = IWL_DEFAULT_TX_RETRY;
700 tx_cmd->data_retry_limit = data_retry_limit;
701
702 if (tx_id >= IWL39_CMD_QUEUE_NUM)
703 rts_retry_limit = 3;
704 else
705 rts_retry_limit = 7;
706
707 if (data_retry_limit < rts_retry_limit)
708 rts_retry_limit = data_retry_limit;
709 tx_cmd->rts_retry_limit = rts_retry_limit;
710
711 tx_cmd->rate = rate;
712 tx_cmd->tx_flags = tx_flags;
713
714 /* OFDM */
715 tx_cmd->supp_rates[0] =
716 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
717
718 /* CCK */
719 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
720
721 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
722 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
723 tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
724 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
725}
726
727static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
728{
729 unsigned long flags_spin;
730 struct iwl_station_entry *station;
731
732 if (sta_id == IWL_INVALID_STATION)
733 return IWL_INVALID_STATION;
734
735 spin_lock_irqsave(&priv->sta_lock, flags_spin);
736 station = &priv->stations[sta_id];
737
738 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
739 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
740 station->sta.mode = STA_CONTROL_MODIFY_MSK;
741 iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
742 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
743
744 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
745 sta_id, tx_rate);
746 return sta_id;
747}
748
749static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
750{
751/*
752 * (for documentation purposes)
753 * to set power to V_AUX, do
754
755 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
756 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
757 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
758 ~APMG_PS_CTRL_MSK_PWR_SRC);
759
760 iwl_poll_bit(priv, CSR_GPIO_IN,
761 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
762 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
763 }
764 */
765
766 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
767 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
768 ~APMG_PS_CTRL_MSK_PWR_SRC);
769
770 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
771 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
772}
773
774static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
775{
776 iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
777 iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
778 rxq->rb_stts_dma);
779 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
780 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
781 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
782 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
783 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
784 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
785 (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
786 FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
787 (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
788 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
789
790 /* fake read to flush all prev I/O */
791 iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
792
793 return 0;
794}
795
796static int iwl3945_tx_reset(struct iwl_priv *priv)
797{
798
799 /* bypass mode */
800 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
801
802 /* RA 0 is active */
803 iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
804
805 /* all 6 fifo are active */
806 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
807
808 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
809 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
810 iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
811 iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
812
813 iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
814 priv->_3945.shared_phys);
815
816 iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
817 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
818 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
819 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
820 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
821 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
822 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
823 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
824
825
826 return 0;
827}
828
829/**
830 * iwl3945_txq_ctx_reset - Reset TX queue context
831 *
832 * Destroys all DMA structures and initialize them again
833 */
834static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
835{
836 int rc;
837 int txq_id, slots_num;
838
839 iwl3945_hw_txq_ctx_free(priv);
840
841 /* allocate tx queue structure */
842 rc = iwl_legacy_alloc_txq_mem(priv);
843 if (rc)
844 return rc;
845
846 /* Tx CMD queue */
847 rc = iwl3945_tx_reset(priv);
848 if (rc)
849 goto error;
850
851 /* Tx queue(s) */
852 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
853 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
854 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
855 rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
856 slots_num, txq_id);
857 if (rc) {
858 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
859 goto error;
860 }
861 }
862
863 return rc;
864
865 error:
866 iwl3945_hw_txq_ctx_free(priv);
867 return rc;
868}
869
870
871/*
872 * Start up 3945's basic functionality after it has been reset
873 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
874 * NOTE: This does not load uCode nor start the embedded processor
875 */
876static int iwl3945_apm_init(struct iwl_priv *priv)
877{
878 int ret = iwl_legacy_apm_init(priv);
879
880 /* Clear APMG (NIC's internal power management) interrupts */
881 iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
882 iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
883
884 /* Reset radio chip */
885 iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
886 APMG_PS_CTRL_VAL_RESET_REQ);
887 udelay(5);
888 iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
889 APMG_PS_CTRL_VAL_RESET_REQ);
890
891 return ret;
892}
893
894static void iwl3945_nic_config(struct iwl_priv *priv)
895{
896 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
897 unsigned long flags;
898 u8 rev_id = priv->pci_dev->revision;
899
900 spin_lock_irqsave(&priv->lock, flags);
901
902 /* Determine HW type */
903 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
904
905 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
906 IWL_DEBUG_INFO(priv, "RTP type\n");
907 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
908 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
909 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
910 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
911 } else {
912 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
913 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
914 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
915 }
916
917 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
918 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
919 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
920 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
921 } else
922 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
923
924 if ((eeprom->board_revision & 0xF0) == 0xD0) {
925 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
926 eeprom->board_revision);
927 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
928 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
929 } else {
930 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
931 eeprom->board_revision);
932 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
933 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
934 }
935
936 if (eeprom->almgor_m_version <= 1) {
937 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
938 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
939 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
940 eeprom->almgor_m_version);
941 } else {
942 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
943 eeprom->almgor_m_version);
944 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
945 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
946 }
947 spin_unlock_irqrestore(&priv->lock, flags);
948
949 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
950 IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
951
952 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
953 IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
954}
955
956int iwl3945_hw_nic_init(struct iwl_priv *priv)
957{
958 int rc;
959 unsigned long flags;
960 struct iwl_rx_queue *rxq = &priv->rxq;
961
962 spin_lock_irqsave(&priv->lock, flags);
963 priv->cfg->ops->lib->apm_ops.init(priv);
964 spin_unlock_irqrestore(&priv->lock, flags);
965
966 iwl3945_set_pwr_vmain(priv);
967
968 priv->cfg->ops->lib->apm_ops.config(priv);
969
970 /* Allocate the RX queue, or reset if it is already allocated */
971 if (!rxq->bd) {
972 rc = iwl_legacy_rx_queue_alloc(priv);
973 if (rc) {
974 IWL_ERR(priv, "Unable to initialize Rx queue\n");
975 return -ENOMEM;
976 }
977 } else
978 iwl3945_rx_queue_reset(priv, rxq);
979
980 iwl3945_rx_replenish(priv);
981
982 iwl3945_rx_init(priv, rxq);
983
984
985 /* Look at using this instead:
986 rxq->need_update = 1;
987 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
988 */
989
990 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
991
992 rc = iwl3945_txq_ctx_reset(priv);
993 if (rc)
994 return rc;
995
996 set_bit(STATUS_INIT, &priv->status);
997
998 return 0;
999}
1000
1001/**
1002 * iwl3945_hw_txq_ctx_free - Free TXQ Context
1003 *
1004 * Destroy all TX DMA queues and structures
1005 */
1006void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1007{
1008 int txq_id;
1009
1010 /* Tx queues */
1011 if (priv->txq)
1012 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1013 txq_id++)
1014 if (txq_id == IWL39_CMD_QUEUE_NUM)
1015 iwl_legacy_cmd_queue_free(priv);
1016 else
1017 iwl_legacy_tx_queue_free(priv, txq_id);
1018
1019 /* free tx queue structure */
1020 iwl_legacy_txq_mem(priv);
1021}
1022
1023void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1024{
1025 int txq_id;
1026
1027 /* stop SCD */
1028 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
1029 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1030
1031 /* reset TFD queues */
1032 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
1033 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1034 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1035 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1036 1000);
1037 }
1038
1039 iwl3945_hw_txq_ctx_free(priv);
1040}
1041
1042/**
1043 * iwl3945_hw_reg_adjust_power_by_temp
1044 * return index delta into power gain settings table
1045*/
1046static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1047{
1048 return (new_reading - old_reading) * (-11) / 100;
1049}
1050
1051/**
1052 * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1053 */
1054static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
1055{
1056 return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
1057}
1058
1059int iwl3945_hw_get_temperature(struct iwl_priv *priv)
1060{
1061 return iwl_read32(priv, CSR_UCODE_DRV_GP2);
1062}
1063
1064/**
1065 * iwl3945_hw_reg_txpower_get_temperature
1066 * get the current temperature by reading from NIC
1067*/
1068static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1069{
1070 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1071 int temperature;
1072
1073 temperature = iwl3945_hw_get_temperature(priv);
1074
1075 /* driver's okay range is -260 to +25.
1076 * human readable okay range is 0 to +285 */
1077 IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
1078
1079 /* handle insane temp reading */
1080 if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
1081 IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
1082
1083 /* if really really hot(?),
1084 * substitute the 3rd band/group's temp measured at factory */
1085 if (priv->last_temperature > 100)
1086 temperature = eeprom->groups[2].temperature;
1087 else /* else use most recent "sane" value from driver */
1088 temperature = priv->last_temperature;
1089 }
1090
1091 return temperature; /* raw, not "human readable" */
1092}
1093
1094/* Adjust Txpower only if temperature variance is greater than threshold.
1095 *
1096 * Both are lower than older versions' 9 degrees */
1097#define IWL_TEMPERATURE_LIMIT_TIMER 6
1098
1099/**
1100 * iwl3945_is_temp_calib_needed - determines if new calibration is needed
1101 *
1102 * records new temperature in tx_mgr->temperature.
1103 * replaces tx_mgr->last_temperature *only* if calib needed
1104 * (assumes caller will actually do the calibration!). */
1105static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
1106{
1107 int temp_diff;
1108
1109 priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
1110 temp_diff = priv->temperature - priv->last_temperature;
1111
1112 /* get absolute value */
1113 if (temp_diff < 0) {
1114 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
1115 temp_diff = -temp_diff;
1116 } else if (temp_diff == 0)
1117 IWL_DEBUG_POWER(priv, "Same temp,\n");
1118 else
1119 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
1120
1121 /* if we don't need calibration, *don't* update last_temperature */
1122 if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
1123 IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
1124 return 0;
1125 }
1126
1127 IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
1128
1129 /* assume that caller will actually do calib ...
1130 * update the "last temperature" value */
1131 priv->last_temperature = priv->temperature;
1132 return 1;
1133}
1134
1135#define IWL_MAX_GAIN_ENTRIES 78
1136#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
1137#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
1138
1139/* radio and DSP power table, each step is 1/2 dB.
1140 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1141static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
1142 {
1143 {251, 127}, /* 2.4 GHz, highest power */
1144 {251, 127},
1145 {251, 127},
1146 {251, 127},
1147 {251, 125},
1148 {251, 110},
1149 {251, 105},
1150 {251, 98},
1151 {187, 125},
1152 {187, 115},
1153 {187, 108},
1154 {187, 99},
1155 {243, 119},
1156 {243, 111},
1157 {243, 105},
1158 {243, 97},
1159 {243, 92},
1160 {211, 106},
1161 {211, 100},
1162 {179, 120},
1163 {179, 113},
1164 {179, 107},
1165 {147, 125},
1166 {147, 119},
1167 {147, 112},
1168 {147, 106},
1169 {147, 101},
1170 {147, 97},
1171 {147, 91},
1172 {115, 107},
1173 {235, 121},
1174 {235, 115},
1175 {235, 109},
1176 {203, 127},
1177 {203, 121},
1178 {203, 115},
1179 {203, 108},
1180 {203, 102},
1181 {203, 96},
1182 {203, 92},
1183 {171, 110},
1184 {171, 104},
1185 {171, 98},
1186 {139, 116},
1187 {227, 125},
1188 {227, 119},
1189 {227, 113},
1190 {227, 107},
1191 {227, 101},
1192 {227, 96},
1193 {195, 113},
1194 {195, 106},
1195 {195, 102},
1196 {195, 95},
1197 {163, 113},
1198 {163, 106},
1199 {163, 102},
1200 {163, 95},
1201 {131, 113},
1202 {131, 106},
1203 {131, 102},
1204 {131, 95},
1205 {99, 113},
1206 {99, 106},
1207 {99, 102},
1208 {99, 95},
1209 {67, 113},
1210 {67, 106},
1211 {67, 102},
1212 {67, 95},
1213 {35, 113},
1214 {35, 106},
1215 {35, 102},
1216 {35, 95},
1217 {3, 113},
1218 {3, 106},
1219 {3, 102},
1220 {3, 95} }, /* 2.4 GHz, lowest power */
1221 {
1222 {251, 127}, /* 5.x GHz, highest power */
1223 {251, 120},
1224 {251, 114},
1225 {219, 119},
1226 {219, 101},
1227 {187, 113},
1228 {187, 102},
1229 {155, 114},
1230 {155, 103},
1231 {123, 117},
1232 {123, 107},
1233 {123, 99},
1234 {123, 92},
1235 {91, 108},
1236 {59, 125},
1237 {59, 118},
1238 {59, 109},
1239 {59, 102},
1240 {59, 96},
1241 {59, 90},
1242 {27, 104},
1243 {27, 98},
1244 {27, 92},
1245 {115, 118},
1246 {115, 111},
1247 {115, 104},
1248 {83, 126},
1249 {83, 121},
1250 {83, 113},
1251 {83, 105},
1252 {83, 99},
1253 {51, 118},
1254 {51, 111},
1255 {51, 104},
1256 {51, 98},
1257 {19, 116},
1258 {19, 109},
1259 {19, 102},
1260 {19, 98},
1261 {19, 93},
1262 {171, 113},
1263 {171, 107},
1264 {171, 99},
1265 {139, 120},
1266 {139, 113},
1267 {139, 107},
1268 {139, 99},
1269 {107, 120},
1270 {107, 113},
1271 {107, 107},
1272 {107, 99},
1273 {75, 120},
1274 {75, 113},
1275 {75, 107},
1276 {75, 99},
1277 {43, 120},
1278 {43, 113},
1279 {43, 107},
1280 {43, 99},
1281 {11, 120},
1282 {11, 113},
1283 {11, 107},
1284 {11, 99},
1285 {131, 107},
1286 {131, 99},
1287 {99, 120},
1288 {99, 113},
1289 {99, 107},
1290 {99, 99},
1291 {67, 120},
1292 {67, 113},
1293 {67, 107},
1294 {67, 99},
1295 {35, 120},
1296 {35, 113},
1297 {35, 107},
1298 {35, 99},
1299 {3, 120} } /* 5.x GHz, lowest power */
1300};
1301
1302static inline u8 iwl3945_hw_reg_fix_power_index(int index)
1303{
1304 if (index < 0)
1305 return 0;
1306 if (index >= IWL_MAX_GAIN_ENTRIES)
1307 return IWL_MAX_GAIN_ENTRIES - 1;
1308 return (u8) index;
1309}
1310
1311/* Kick off thermal recalibration check every 60 seconds */
1312#define REG_RECALIB_PERIOD (60)
1313
1314/**
1315 * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1316 *
1317 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1318 * or 6 Mbit (OFDM) rates.
1319 */
1320static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
1321 s32 rate_index, const s8 *clip_pwrs,
1322 struct iwl_channel_info *ch_info,
1323 int band_index)
1324{
1325 struct iwl3945_scan_power_info *scan_power_info;
1326 s8 power;
1327 u8 power_index;
1328
1329 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
1330
1331 /* use this channel group's 6Mbit clipping/saturation pwr,
1332 * but cap at regulatory scan power restriction (set during init
1333 * based on eeprom channel data) for this channel. */
1334 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
1335
1336 power = min(power, priv->tx_power_user_lmt);
1337 scan_power_info->requested_power = power;
1338
1339 /* find difference between new scan *power* and current "normal"
1340 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1341 * current "normal" temperature-compensated Tx power *index* for
1342 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1343 * *index*. */
1344 power_index = ch_info->power_info[rate_index].power_table_index
1345 - (power - ch_info->power_info
1346 [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
1347
1348 /* store reference index that we use when adjusting *all* scan
1349 * powers. So we can accommodate user (all channel) or spectrum
1350 * management (single channel) power changes "between" temperature
1351 * feedback compensation procedures.
1352 * don't force fit this reference index into gain table; it may be a
1353 * negative number. This will help avoid errors when we're at
1354 * the lower bounds (highest gains, for warmest temperatures)
1355 * of the table. */
1356
1357 /* don't exceed table bounds for "real" setting */
1358 power_index = iwl3945_hw_reg_fix_power_index(power_index);
1359
1360 scan_power_info->power_table_index = power_index;
1361 scan_power_info->tpc.tx_gain =
1362 power_gain_table[band_index][power_index].tx_gain;
1363 scan_power_info->tpc.dsp_atten =
1364 power_gain_table[band_index][power_index].dsp_atten;
1365}
1366
1367/**
1368 * iwl3945_send_tx_power - fill in Tx Power command with gain settings
1369 *
1370 * Configures power settings for all rates for the current channel,
1371 * using values from channel info struct, and send to NIC
1372 */
1373static int iwl3945_send_tx_power(struct iwl_priv *priv)
1374{
1375 int rate_idx, i;
1376 const struct iwl_channel_info *ch_info = NULL;
1377 struct iwl3945_txpowertable_cmd txpower = {
1378 .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
1379 };
1380 u16 chan;
1381
1382 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1383 "TX Power requested while scanning!\n"))
1384 return -EAGAIN;
1385
1386 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1387
1388 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1389 ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
1390 if (!ch_info) {
1391 IWL_ERR(priv,
1392 "Failed to get channel info for channel %d [%d]\n",
1393 chan, priv->band);
1394 return -EINVAL;
1395 }
1396
1397 if (!iwl_legacy_is_channel_valid(ch_info)) {
1398 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
1399 "non-Tx channel.\n");
1400 return 0;
1401 }
1402
1403 /* fill cmd with power settings for all rates for current channel */
1404 /* Fill OFDM rate */
1405 for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
1406 rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
1407
1408 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1409 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1410
1411 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1412 le16_to_cpu(txpower.channel),
1413 txpower.band,
1414 txpower.power[i].tpc.tx_gain,
1415 txpower.power[i].tpc.dsp_atten,
1416 txpower.power[i].rate);
1417 }
1418 /* Fill CCK rates */
1419 for (rate_idx = IWL_FIRST_CCK_RATE;
1420 rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
1421 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1422 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1423
1424 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1425 le16_to_cpu(txpower.channel),
1426 txpower.band,
1427 txpower.power[i].tpc.tx_gain,
1428 txpower.power[i].tpc.dsp_atten,
1429 txpower.power[i].rate);
1430 }
1431
1432 return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1433 sizeof(struct iwl3945_txpowertable_cmd),
1434 &txpower);
1435
1436}
1437
1438/**
1439 * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
1440 * @ch_info: Channel to update. Uses power_info.requested_power.
1441 *
1442 * Replace requested_power and base_power_index ch_info fields for
1443 * one channel.
1444 *
1445 * Called if user or spectrum management changes power preferences.
1446 * Takes into account h/w and modulation limitations (clip power).
1447 *
1448 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1449 *
1450 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1451 * properly fill out the scan powers, and actual h/w gain settings,
1452 * and send changes to NIC
1453 */
1454static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
1455 struct iwl_channel_info *ch_info)
1456{
1457 struct iwl3945_channel_power_info *power_info;
1458 int power_changed = 0;
1459 int i;
1460 const s8 *clip_pwrs;
1461 int power;
1462
1463 /* Get this chnlgrp's rate-to-max/clip-powers table */
1464 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1465
1466 /* Get this channel's rate-to-current-power settings table */
1467 power_info = ch_info->power_info;
1468
1469 /* update OFDM Txpower settings */
1470 for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
1471 i++, ++power_info) {
1472 int delta_idx;
1473
1474 /* limit new power to be no more than h/w capability */
1475 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1476 if (power == power_info->requested_power)
1477 continue;
1478
1479 /* find difference between old and new requested powers,
1480 * update base (non-temp-compensated) power index */
1481 delta_idx = (power - power_info->requested_power) * 2;
1482 power_info->base_power_index -= delta_idx;
1483
1484 /* save new requested power value */
1485 power_info->requested_power = power;
1486
1487 power_changed = 1;
1488 }
1489
1490 /* update CCK Txpower settings, based on OFDM 12M setting ...
1491 * ... all CCK power settings for a given channel are the *same*. */
1492 if (power_changed) {
1493 power =
1494 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1495 requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
1496
1497 /* do all CCK rates' iwl3945_channel_power_info structures */
1498 for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
1499 power_info->requested_power = power;
1500 power_info->base_power_index =
1501 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1502 base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
1503 ++power_info;
1504 }
1505 }
1506
1507 return 0;
1508}
1509
1510/**
1511 * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1512 *
1513 * NOTE: Returned power limit may be less (but not more) than requested,
1514 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1515 * (no consideration for h/w clipping limitations).
1516 */
1517static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
1518{
1519 s8 max_power;
1520
1521#if 0
1522 /* if we're using TGd limits, use lower of TGd or EEPROM */
1523 if (ch_info->tgd_data.max_power != 0)
1524 max_power = min(ch_info->tgd_data.max_power,
1525 ch_info->eeprom.max_power_avg);
1526
1527 /* else just use EEPROM limits */
1528 else
1529#endif
1530 max_power = ch_info->eeprom.max_power_avg;
1531
1532 return min(max_power, ch_info->max_power_avg);
1533}
1534
1535/**
1536 * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
1537 *
1538 * Compensate txpower settings of *all* channels for temperature.
1539 * This only accounts for the difference between current temperature
1540 * and the factory calibration temperatures, and bases the new settings
1541 * on the channel's base_power_index.
1542 *
1543 * If RxOn is "associated", this sends the new Txpower to NIC!
1544 */
1545static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1546{
1547 struct iwl_channel_info *ch_info = NULL;
1548 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1549 int delta_index;
1550 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1551 u8 a_band;
1552 u8 rate_index;
1553 u8 scan_tbl_index;
1554 u8 i;
1555 int ref_temp;
1556 int temperature = priv->temperature;
1557
1558 if (priv->disable_tx_power_cal ||
1559 test_bit(STATUS_SCANNING, &priv->status)) {
1560 /* do not perform tx power calibration */
1561 return 0;
1562 }
1563 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1564 for (i = 0; i < priv->channel_count; i++) {
1565 ch_info = &priv->channel_info[i];
1566 a_band = iwl_legacy_is_channel_a_band(ch_info);
1567
1568 /* Get this chnlgrp's factory calibration temperature */
1569 ref_temp = (s16)eeprom->groups[ch_info->group_index].
1570 temperature;
1571
1572 /* get power index adjustment based on current and factory
1573 * temps */
1574 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
1575 ref_temp);
1576
1577 /* set tx power value for all rates, OFDM and CCK */
1578 for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
1579 rate_index++) {
1580 int power_idx =
1581 ch_info->power_info[rate_index].base_power_index;
1582
1583 /* temperature compensate */
1584 power_idx += delta_index;
1585
1586 /* stay within table range */
1587 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
1588 ch_info->power_info[rate_index].
1589 power_table_index = (u8) power_idx;
1590 ch_info->power_info[rate_index].tpc =
1591 power_gain_table[a_band][power_idx];
1592 }
1593
1594 /* Get this chnlgrp's rate-to-max/clip-powers table */
1595 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1596
1597 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1598 for (scan_tbl_index = 0;
1599 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
1600 s32 actual_index = (scan_tbl_index == 0) ?
1601 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
1602 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
1603 actual_index, clip_pwrs,
1604 ch_info, a_band);
1605 }
1606 }
1607
1608 /* send Txpower command for current channel to ucode */
1609 return priv->cfg->ops->lib->send_tx_power(priv);
1610}
1611
1612int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1613{
1614 struct iwl_channel_info *ch_info;
1615 s8 max_power;
1616 u8 a_band;
1617 u8 i;
1618
1619 if (priv->tx_power_user_lmt == power) {
1620 IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
1621 "limit: %ddBm.\n", power);
1622 return 0;
1623 }
1624
1625 IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
1626 priv->tx_power_user_lmt = power;
1627
1628 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1629
1630 for (i = 0; i < priv->channel_count; i++) {
1631 ch_info = &priv->channel_info[i];
1632 a_band = iwl_legacy_is_channel_a_band(ch_info);
1633
1634 /* find minimum power of all user and regulatory constraints
1635 * (does not consider h/w clipping limitations) */
1636 max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
1637 max_power = min(power, max_power);
1638 if (max_power != ch_info->curr_txpow) {
1639 ch_info->curr_txpow = max_power;
1640
1641 /* this considers the h/w clipping limitations */
1642 iwl3945_hw_reg_set_new_power(priv, ch_info);
1643 }
1644 }
1645
1646 /* update txpower settings for all channels,
1647 * send to NIC if associated. */
1648 iwl3945_is_temp_calib_needed(priv);
1649 iwl3945_hw_reg_comp_txpower_temp(priv);
1650
1651 return 0;
1652}
1653
1654static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1655 struct iwl_rxon_context *ctx)
1656{
1657 int rc = 0;
1658 struct iwl_rx_packet *pkt;
1659 struct iwl3945_rxon_assoc_cmd rxon_assoc;
1660 struct iwl_host_cmd cmd = {
1661 .id = REPLY_RXON_ASSOC,
1662 .len = sizeof(rxon_assoc),
1663 .flags = CMD_WANT_SKB,
1664 .data = &rxon_assoc,
1665 };
1666 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1667 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1668
1669 if ((rxon1->flags == rxon2->flags) &&
1670 (rxon1->filter_flags == rxon2->filter_flags) &&
1671 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1672 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1673 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1674 return 0;
1675 }
1676
1677 rxon_assoc.flags = ctx->staging.flags;
1678 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1679 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1680 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1681 rxon_assoc.reserved = 0;
1682
1683 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
1684 if (rc)
1685 return rc;
1686
1687 pkt = (struct iwl_rx_packet *)cmd.reply_page;
1688 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1689 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
1690 rc = -EIO;
1691 }
1692
1693 iwl_legacy_free_pages(priv, cmd.reply_page);
1694
1695 return rc;
1696}
1697
1698/**
1699 * iwl3945_commit_rxon - commit staging_rxon to hardware
1700 *
1701 * The RXON command in staging_rxon is committed to the hardware and
1702 * the active_rxon structure is updated with the new data. This
1703 * function correctly transitions out of the RXON_ASSOC_MSK state if
1704 * a HW tune is required based on the RXON structure changes.
1705 */
1706int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1707{
1708 /* cast away the const for active_rxon in this function */
1709 struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1710 struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1711 int rc = 0;
1712 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1713
1714 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1715 return -EINVAL;
1716
1717 if (!iwl_legacy_is_alive(priv))
1718 return -1;
1719
1720 /* always get timestamp with Rx frame */
1721 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1722
1723 /* select antenna */
1724 staging_rxon->flags &=
1725 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1726 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1727
1728 rc = iwl_legacy_check_rxon_cmd(priv, ctx);
1729 if (rc) {
1730 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1731 return -EINVAL;
1732 }
1733
1734 /* If we don't need to send a full RXON, we can use
1735 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1736 * and other flags for the current radio configuration. */
1737 if (!iwl_legacy_full_rxon_required(priv,
1738 &priv->contexts[IWL_RXON_CTX_BSS])) {
1739 rc = iwl_legacy_send_rxon_assoc(priv,
1740 &priv->contexts[IWL_RXON_CTX_BSS]);
1741 if (rc) {
1742 IWL_ERR(priv, "Error setting RXON_ASSOC "
1743 "configuration (%d).\n", rc);
1744 return rc;
1745 }
1746
1747 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1748 /*
1749 * We do not commit tx power settings while channel changing,
1750 * do it now if tx power changed.
1751 */
1752 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1753 return 0;
1754 }
1755
1756 /* If we are currently associated and the new config requires
1757 * an RXON_ASSOC and the new config wants the associated mask enabled,
1758 * we must clear the associated from the active configuration
1759 * before we apply the new config */
1760 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1761 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1762 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1763
1764 /*
1765 * reserved4 and 5 could have been filled by the iwlcore code.
1766 * Let's clear them before pushing to the 3945.
1767 */
1768 active_rxon->reserved4 = 0;
1769 active_rxon->reserved5 = 0;
1770 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1771 sizeof(struct iwl3945_rxon_cmd),
1772 &priv->contexts[IWL_RXON_CTX_BSS].active);
1773
1774 /* If the mask clearing failed then we set
1775 * active_rxon back to what it was previously */
1776 if (rc) {
1777 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1778 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
1779 "configuration (%d).\n", rc);
1780 return rc;
1781 }
1782 iwl_legacy_clear_ucode_stations(priv,
1783 &priv->contexts[IWL_RXON_CTX_BSS]);
1784 iwl_legacy_restore_stations(priv,
1785 &priv->contexts[IWL_RXON_CTX_BSS]);
1786 }
1787
1788 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1789 "* with%s RXON_FILTER_ASSOC_MSK\n"
1790 "* channel = %d\n"
1791 "* bssid = %pM\n",
1792 (new_assoc ? "" : "out"),
1793 le16_to_cpu(staging_rxon->channel),
1794 staging_rxon->bssid_addr);
1795
1796 /*
1797 * reserved4 and 5 could have been filled by the iwlcore code.
1798 * Let's clear them before pushing to the 3945.
1799 */
1800 staging_rxon->reserved4 = 0;
1801 staging_rxon->reserved5 = 0;
1802
1803 iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1804
1805 /* Apply the new configuration */
1806 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1807 sizeof(struct iwl3945_rxon_cmd),
1808 staging_rxon);
1809 if (rc) {
1810 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
1811 return rc;
1812 }
1813
1814 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1815
1816 if (!new_assoc) {
1817 iwl_legacy_clear_ucode_stations(priv,
1818 &priv->contexts[IWL_RXON_CTX_BSS]);
1819 iwl_legacy_restore_stations(priv,
1820 &priv->contexts[IWL_RXON_CTX_BSS]);
1821 }
1822
1823 /* If we issue a new RXON command which required a tune then we must
1824 * send a new TXPOWER command or we won't be able to Tx any frames */
1825 rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1826 if (rc) {
1827 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1828 return rc;
1829 }
1830
1831 /* Init the hardware's rate fallback order based on the band */
1832 rc = iwl3945_init_hw_rate_table(priv);
1833 if (rc) {
1834 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
1835 return -EIO;
1836 }
1837
1838 return 0;
1839}
1840
1841/**
1842 * iwl3945_reg_txpower_periodic - called when time to check our temperature.
1843 *
1844 * -- reset periodic timer
1845 * -- see if temp has changed enough to warrant re-calibration ... if so:
1846 * -- correct coeffs for temp (can reset temp timer)
1847 * -- save this temp as "last",
1848 * -- send new set of gain settings to NIC
1849 * NOTE: This should continue working, even when we're not associated,
1850 * so we can keep our internal table of scan powers current. */
1851void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1852{
1853 /* This will kick in the "brute force"
1854 * iwl3945_hw_reg_comp_txpower_temp() below */
1855 if (!iwl3945_is_temp_calib_needed(priv))
1856 goto reschedule;
1857
1858 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1859 * This is based *only* on current temperature,
1860 * ignoring any previous power measurements */
1861 iwl3945_hw_reg_comp_txpower_temp(priv);
1862
1863 reschedule:
1864 queue_delayed_work(priv->workqueue,
1865 &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
1866}
1867
1868static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
1869{
1870 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1871 _3945.thermal_periodic.work);
1872
1873 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1874 return;
1875
1876 mutex_lock(&priv->mutex);
1877 iwl3945_reg_txpower_periodic(priv);
1878 mutex_unlock(&priv->mutex);
1879}
1880
1881/**
1882 * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
1883 * for the channel.
1884 *
1885 * This function is used when initializing channel-info structs.
1886 *
1887 * NOTE: These channel groups do *NOT* match the bands above!
1888 * These channel groups are based on factory-tested channels;
1889 * on A-band, EEPROM's "group frequency" entries represent the top
1890 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1891 */
1892static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
1893 const struct iwl_channel_info *ch_info)
1894{
1895 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1896 struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1897 u8 group;
1898 u16 group_index = 0; /* based on factory calib frequencies */
1899 u8 grp_channel;
1900
1901 /* Find the group index for the channel ... don't use index 1(?) */
1902 if (iwl_legacy_is_channel_a_band(ch_info)) {
1903 for (group = 1; group < 5; group++) {
1904 grp_channel = ch_grp[group].group_channel;
1905 if (ch_info->channel <= grp_channel) {
1906 group_index = group;
1907 break;
1908 }
1909 }
1910 /* group 4 has a few channels *above* its factory cal freq */
1911 if (group == 5)
1912 group_index = 4;
1913 } else
1914 group_index = 0; /* 2.4 GHz, group 0 */
1915
1916 IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
1917 group_index);
1918 return group_index;
1919}
1920
1921/**
1922 * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
1923 *
1924 * Interpolate to get nominal (i.e. at factory calibration temperature) index
1925 * into radio/DSP gain settings table for requested power.
1926 */
1927static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
1928 s8 requested_power,
1929 s32 setting_index, s32 *new_index)
1930{
1931 const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
1932 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1933 s32 index0, index1;
1934 s32 power = 2 * requested_power;
1935 s32 i;
1936 const struct iwl3945_eeprom_txpower_sample *samples;
1937 s32 gains0, gains1;
1938 s32 res;
1939 s32 denominator;
1940
1941 chnl_grp = &eeprom->groups[setting_index];
1942 samples = chnl_grp->samples;
1943 for (i = 0; i < 5; i++) {
1944 if (power == samples[i].power) {
1945 *new_index = samples[i].gain_index;
1946 return 0;
1947 }
1948 }
1949
1950 if (power > samples[1].power) {
1951 index0 = 0;
1952 index1 = 1;
1953 } else if (power > samples[2].power) {
1954 index0 = 1;
1955 index1 = 2;
1956 } else if (power > samples[3].power) {
1957 index0 = 2;
1958 index1 = 3;
1959 } else {
1960 index0 = 3;
1961 index1 = 4;
1962 }
1963
1964 denominator = (s32) samples[index1].power - (s32) samples[index0].power;
1965 if (denominator == 0)
1966 return -EINVAL;
1967 gains0 = (s32) samples[index0].gain_index * (1 << 19);
1968 gains1 = (s32) samples[index1].gain_index * (1 << 19);
1969 res = gains0 + (gains1 - gains0) *
1970 ((s32) power - (s32) samples[index0].power) / denominator +
1971 (1 << 18);
1972 *new_index = res >> 19;
1973 return 0;
1974}
1975
1976static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
1977{
1978 u32 i;
1979 s32 rate_index;
1980 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1981 const struct iwl3945_eeprom_txpower_group *group;
1982
1983 IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
1984
1985 for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
1986 s8 *clip_pwrs; /* table of power levels for each rate */
1987 s8 satur_pwr; /* saturation power for each chnl group */
1988 group = &eeprom->groups[i];
1989
1990 /* sanity check on factory saturation power value */
1991 if (group->saturation_power < 40) {
1992 IWL_WARN(priv, "Error: saturation power is %d, "
1993 "less than minimum expected 40\n",
1994 group->saturation_power);
1995 return;
1996 }
1997
1998 /*
1999 * Derive requested power levels for each rate, based on
2000 * hardware capabilities (saturation power for band).
2001 * Basic value is 3dB down from saturation, with further
2002 * power reductions for highest 3 data rates. These
2003 * backoffs provide headroom for high rate modulation
2004 * power peaks, without too much distortion (clipping).
2005 */
2006 /* we'll fill in this array with h/w max power levels */
2007 clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
2008
2009 /* divide factory saturation power by 2 to find -3dB level */
2010 satur_pwr = (s8) (group->saturation_power >> 1);
2011
2012 /* fill in channel group's nominal powers for each rate */
2013 for (rate_index = 0;
2014 rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
2015 switch (rate_index) {
2016 case IWL_RATE_36M_INDEX_TABLE:
2017 if (i == 0) /* B/G */
2018 *clip_pwrs = satur_pwr;
2019 else /* A */
2020 *clip_pwrs = satur_pwr - 5;
2021 break;
2022 case IWL_RATE_48M_INDEX_TABLE:
2023 if (i == 0)
2024 *clip_pwrs = satur_pwr - 7;
2025 else
2026 *clip_pwrs = satur_pwr - 10;
2027 break;
2028 case IWL_RATE_54M_INDEX_TABLE:
2029 if (i == 0)
2030 *clip_pwrs = satur_pwr - 9;
2031 else
2032 *clip_pwrs = satur_pwr - 12;
2033 break;
2034 default:
2035 *clip_pwrs = satur_pwr;
2036 break;
2037 }
2038 }
2039 }
2040}
2041
2042/**
2043 * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2044 *
2045 * Second pass (during init) to set up priv->channel_info
2046 *
2047 * Set up Tx-power settings in our channel info database for each VALID
2048 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2049 * and current temperature.
2050 *
2051 * Since this is based on current temperature (at init time), these values may
2052 * not be valid for very long, but it gives us a starting/default point,
2053 * and allows us to active (i.e. using Tx) scan.
2054 *
2055 * This does *not* write values to NIC, just sets up our internal table.
2056 */
2057int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2058{
2059 struct iwl_channel_info *ch_info = NULL;
2060 struct iwl3945_channel_power_info *pwr_info;
2061 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
2062 int delta_index;
2063 u8 rate_index;
2064 u8 scan_tbl_index;
2065 const s8 *clip_pwrs; /* array of power levels for each rate */
2066 u8 gain, dsp_atten;
2067 s8 power;
2068 u8 pwr_index, base_pwr_index, a_band;
2069 u8 i;
2070 int temperature;
2071
2072 /* save temperature reference,
2073 * so we can determine next time to calibrate */
2074 temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
2075 priv->last_temperature = temperature;
2076
2077 iwl3945_hw_reg_init_channel_groups(priv);
2078
2079 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2080 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
2081 i++, ch_info++) {
2082 a_band = iwl_legacy_is_channel_a_band(ch_info);
2083 if (!iwl_legacy_is_channel_valid(ch_info))
2084 continue;
2085
2086 /* find this channel's channel group (*not* "band") index */
2087 ch_info->group_index =
2088 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
2089
2090 /* Get this chnlgrp's rate->max/clip-powers table */
2091 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
2092
2093 /* calculate power index *adjustment* value according to
2094 * diff between current temperature and factory temperature */
2095 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
2096 eeprom->groups[ch_info->group_index].
2097 temperature);
2098
2099 IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
2100 ch_info->channel, delta_index, temperature +
2101 IWL_TEMP_CONVERT);
2102
2103 /* set tx power value for all OFDM rates */
2104 for (rate_index = 0; rate_index < IWL_OFDM_RATES;
2105 rate_index++) {
2106 s32 uninitialized_var(power_idx);
2107 int rc;
2108
2109 /* use channel group's clip-power table,
2110 * but don't exceed channel's max power */
2111 s8 pwr = min(ch_info->max_power_avg,
2112 clip_pwrs[rate_index]);
2113
2114 pwr_info = &ch_info->power_info[rate_index];
2115
2116 /* get base (i.e. at factory-measured temperature)
2117 * power table index for this rate's power */
2118 rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
2119 ch_info->group_index,
2120 &power_idx);
2121 if (rc) {
2122 IWL_ERR(priv, "Invalid power index\n");
2123 return rc;
2124 }
2125 pwr_info->base_power_index = (u8) power_idx;
2126
2127 /* temperature compensate */
2128 power_idx += delta_index;
2129
2130 /* stay within range of gain table */
2131 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
2132
2133 /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
2134 pwr_info->requested_power = pwr;
2135 pwr_info->power_table_index = (u8) power_idx;
2136 pwr_info->tpc.tx_gain =
2137 power_gain_table[a_band][power_idx].tx_gain;
2138 pwr_info->tpc.dsp_atten =
2139 power_gain_table[a_band][power_idx].dsp_atten;
2140 }
2141
2142 /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
2143 pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
2144 power = pwr_info->requested_power +
2145 IWL_CCK_FROM_OFDM_POWER_DIFF;
2146 pwr_index = pwr_info->power_table_index +
2147 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2148 base_pwr_index = pwr_info->base_power_index +
2149 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2150
2151 /* stay within table range */
2152 pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
2153 gain = power_gain_table[a_band][pwr_index].tx_gain;
2154 dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
2155
2156 /* fill each CCK rate's iwl3945_channel_power_info structure
2157 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2158 * NOTE: CCK rates start at end of OFDM rates! */
2159 for (rate_index = 0;
2160 rate_index < IWL_CCK_RATES; rate_index++) {
2161 pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
2162 pwr_info->requested_power = power;
2163 pwr_info->power_table_index = pwr_index;
2164 pwr_info->base_power_index = base_pwr_index;
2165 pwr_info->tpc.tx_gain = gain;
2166 pwr_info->tpc.dsp_atten = dsp_atten;
2167 }
2168
2169 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2170 for (scan_tbl_index = 0;
2171 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
2172 s32 actual_index = (scan_tbl_index == 0) ?
2173 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
2174 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
2175 actual_index, clip_pwrs, ch_info, a_band);
2176 }
2177 }
2178
2179 return 0;
2180}
2181
2182int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2183{
2184 int rc;
2185
2186 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2187 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
2188 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2189 if (rc < 0)
2190 IWL_ERR(priv, "Can't stop Rx DMA.\n");
2191
2192 return 0;
2193}
2194
2195int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2196{
2197 int txq_id = txq->q.id;
2198
2199 struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
2200
2201 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2202
2203 iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2204 iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2205
2206 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
2207 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2208 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2209 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2210 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2211 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2212
2213 /* fake read to flush all prev. writes */
2214 iwl_read32(priv, FH39_TSSR_CBB_BASE);
2215
2216 return 0;
2217}
2218
2219/*
2220 * HCMD utils
2221 */
2222static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2223{
2224 switch (cmd_id) {
2225 case REPLY_RXON:
2226 return sizeof(struct iwl3945_rxon_cmd);
2227 case POWER_TABLE_CMD:
2228 return sizeof(struct iwl3945_powertable_cmd);
2229 default:
2230 return len;
2231 }
2232}
2233
2234
2235static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
2236 u8 *data)
2237{
2238 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
2239 addsta->mode = cmd->mode;
2240 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2241 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
2242 addsta->station_flags = cmd->station_flags;
2243 addsta->station_flags_msk = cmd->station_flags_msk;
2244 addsta->tid_disable_tx = cpu_to_le16(0);
2245 addsta->rate_n_flags = cmd->rate_n_flags;
2246 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2247 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2248 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2249
2250 return (u16)sizeof(struct iwl3945_addsta_cmd);
2251}
2252
2253static int iwl3945_add_bssid_station(struct iwl_priv *priv,
2254 const u8 *addr, u8 *sta_id_r)
2255{
2256 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2257 int ret;
2258 u8 sta_id;
2259 unsigned long flags;
2260
2261 if (sta_id_r)
2262 *sta_id_r = IWL_INVALID_STATION;
2263
2264 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
2265 if (ret) {
2266 IWL_ERR(priv, "Unable to add station %pM\n", addr);
2267 return ret;
2268 }
2269
2270 if (sta_id_r)
2271 *sta_id_r = sta_id;
2272
2273 spin_lock_irqsave(&priv->sta_lock, flags);
2274 priv->stations[sta_id].used |= IWL_STA_LOCAL;
2275 spin_unlock_irqrestore(&priv->sta_lock, flags);
2276
2277 return 0;
2278}
2279static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2280 struct ieee80211_vif *vif, bool add)
2281{
2282 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2283 int ret;
2284
2285 if (add) {
2286 ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
2287 &vif_priv->ibss_bssid_sta_id);
2288 if (ret)
2289 return ret;
2290
2291 iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
2292 (priv->band == IEEE80211_BAND_5GHZ) ?
2293 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
2294 iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
2295
2296 return 0;
2297 }
2298
2299 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
2300 vif->bss_conf.bssid);
2301}
2302
2303/**
2304 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
2305 */
2306int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2307{
2308 int rc, i, index, prev_index;
2309 struct iwl3945_rate_scaling_cmd rate_cmd = {
2310 .reserved = {0, 0, 0},
2311 };
2312 struct iwl3945_rate_scaling_info *table = rate_cmd.table;
2313
2314 for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
2315 index = iwl3945_rates[i].table_rs_index;
2316
2317 table[index].rate_n_flags =
2318 iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
2319 table[index].try_cnt = priv->retry_rate;
2320 prev_index = iwl3945_get_prev_ieee_rate(i);
2321 table[index].next_rate_index =
2322 iwl3945_rates[prev_index].table_rs_index;
2323 }
2324
2325 switch (priv->band) {
2326 case IEEE80211_BAND_5GHZ:
2327 IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
2328 /* If one of the following CCK rates is used,
2329 * have it fall back to the 6M OFDM rate */
2330 for (i = IWL_RATE_1M_INDEX_TABLE;
2331 i <= IWL_RATE_11M_INDEX_TABLE; i++)
2332 table[i].next_rate_index =
2333 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2334
2335 /* Don't fall back to CCK rates */
2336 table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
2337 IWL_RATE_9M_INDEX_TABLE;
2338
2339 /* Don't drop out of OFDM rates */
2340 table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
2341 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2342 break;
2343
2344 case IEEE80211_BAND_2GHZ:
2345 IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
2346 /* If an OFDM rate is used, have it fall back to the
2347 * 1M CCK rates */
2348
2349 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2350 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2351
2352 index = IWL_FIRST_CCK_RATE;
2353 for (i = IWL_RATE_6M_INDEX_TABLE;
2354 i <= IWL_RATE_54M_INDEX_TABLE; i++)
2355 table[i].next_rate_index =
2356 iwl3945_rates[index].table_rs_index;
2357
2358 index = IWL_RATE_11M_INDEX_TABLE;
2359 /* CCK shouldn't fall back to OFDM... */
2360 table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
2361 }
2362 break;
2363
2364 default:
2365 WARN_ON(1);
2366 break;
2367 }
2368
2369 /* Update the rate scaling for control frame Tx */
2370 rate_cmd.table_id = 0;
2371 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2372 &rate_cmd);
2373 if (rc)
2374 return rc;
2375
2376 /* Update the rate scaling for data frame Tx */
2377 rate_cmd.table_id = 1;
2378 return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2379 &rate_cmd);
2380}
2381
2382/* Called when initializing driver */
2383int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2384{
2385 memset((void *)&priv->hw_params, 0,
2386 sizeof(struct iwl_hw_params));
2387
2388 priv->_3945.shared_virt =
2389 dma_alloc_coherent(&priv->pci_dev->dev,
2390 sizeof(struct iwl3945_shared),
2391 &priv->_3945.shared_phys, GFP_KERNEL);
2392 if (!priv->_3945.shared_virt) {
2393 IWL_ERR(priv, "failed to allocate pci memory\n");
2394 return -ENOMEM;
2395 }
2396
2397 /* Assign number of Usable TX queues */
2398 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
2399
2400 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2401 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
2402 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2403 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2404 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
2405 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
2406
2407 priv->sta_key_max_num = STA_KEY_MAX_NUM;
2408
2409 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2410 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
2411 priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
2412
2413 return 0;
2414}
2415
2416unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
2417 struct iwl3945_frame *frame, u8 rate)
2418{
2419 struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
2420 unsigned int frame_size;
2421
2422 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
2423 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2424
2425 tx_beacon_cmd->tx.sta_id =
2426 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2427 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2428
2429 frame_size = iwl3945_fill_beacon_frame(priv,
2430 tx_beacon_cmd->frame,
2431 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2432
2433 BUG_ON(frame_size > MAX_MPDU_SIZE);
2434 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2435
2436 tx_beacon_cmd->tx.rate = rate;
2437 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2438 TX_CMD_FLG_TSF_MSK);
2439
2440 /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
2441 tx_beacon_cmd->tx.supp_rates[0] =
2442 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2443
2444 tx_beacon_cmd->tx.supp_rates[1] =
2445 (IWL_CCK_BASIC_RATES_MASK & 0xF);
2446
2447 return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
2448}
2449
2450void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
2451{
2452 priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
2453 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
2454}
2455
2456void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
2457{
2458 INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
2459 iwl3945_bg_reg_txpower_periodic);
2460}
2461
2462void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
2463{
2464 cancel_delayed_work(&priv->_3945.thermal_periodic);
2465}
2466
2467/* check contents of special bootstrap uCode SRAM */
2468static int iwl3945_verify_bsm(struct iwl_priv *priv)
2469 {
2470 __le32 *image = priv->ucode_boot.v_addr;
2471 u32 len = priv->ucode_boot.len;
2472 u32 reg;
2473 u32 val;
2474
2475 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
2476
2477 /* verify BSM SRAM contents */
2478 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
2479 for (reg = BSM_SRAM_LOWER_BOUND;
2480 reg < BSM_SRAM_LOWER_BOUND + len;
2481 reg += sizeof(u32), image++) {
2482 val = iwl_legacy_read_prph(priv, reg);
2483 if (val != le32_to_cpu(*image)) {
2484 IWL_ERR(priv, "BSM uCode verification failed at "
2485 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2486 BSM_SRAM_LOWER_BOUND,
2487 reg - BSM_SRAM_LOWER_BOUND, len,
2488 val, le32_to_cpu(*image));
2489 return -EIO;
2490 }
2491 }
2492
2493 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
2494
2495 return 0;
2496}
2497
2498
2499/******************************************************************************
2500 *
2501 * EEPROM related functions
2502 *
2503 ******************************************************************************/
2504
2505/*
2506 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2507 * embedded controller) as EEPROM reader; each read is a series of pulses
2508 * to/from the EEPROM chip, not a single event, so even reads could conflict
2509 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2510 * simply claims ownership, which should be safe when this function is called
2511 * (i.e. before loading uCode!).
2512 */
2513static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
2514{
2515 _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2516 return 0;
2517}
2518
2519
2520static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
2521{
2522 return;
2523}
2524
2525 /**
2526 * iwl3945_load_bsm - Load bootstrap instructions
2527 *
2528 * BSM operation:
2529 *
2530 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2531 * in special SRAM that does not power down during RFKILL. When powering back
2532 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2533 * the bootstrap program into the on-board processor, and starts it.
2534 *
2535 * The bootstrap program loads (via DMA) instructions and data for a new
2536 * program from host DRAM locations indicated by the host driver in the
2537 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2538 * automatically.
2539 *
2540 * When initializing the NIC, the host driver points the BSM to the
2541 * "initialize" uCode image. This uCode sets up some internal data, then
2542 * notifies host via "initialize alive" that it is complete.
2543 *
2544 * The host then replaces the BSM_DRAM_* pointer values to point to the
2545 * normal runtime uCode instructions and a backup uCode data cache buffer
2546 * (filled initially with starting data values for the on-board processor),
2547 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2548 * which begins normal operation.
2549 *
2550 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2551 * the backup data cache in DRAM before SRAM is powered down.
2552 *
2553 * When powering back up, the BSM loads the bootstrap program. This reloads
2554 * the runtime uCode instructions and the backup data cache into SRAM,
2555 * and re-launches the runtime uCode from where it left off.
2556 */
2557static int iwl3945_load_bsm(struct iwl_priv *priv)
2558{
2559 __le32 *image = priv->ucode_boot.v_addr;
2560 u32 len = priv->ucode_boot.len;
2561 dma_addr_t pinst;
2562 dma_addr_t pdata;
2563 u32 inst_len;
2564 u32 data_len;
2565 int rc;
2566 int i;
2567 u32 done;
2568 u32 reg_offset;
2569
2570 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
2571
2572 /* make sure bootstrap program is no larger than BSM's SRAM size */
2573 if (len > IWL39_MAX_BSM_SIZE)
2574 return -EINVAL;
2575
2576 /* Tell bootstrap uCode where to find the "Initialize" uCode
2577 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2578 * NOTE: iwl3945_initialize_alive_start() will replace these values,
2579 * after the "initialize" uCode has run, to point to
2580 * runtime/protocol instructions and backup data cache. */
2581 pinst = priv->ucode_init.p_addr;
2582 pdata = priv->ucode_init_data.p_addr;
2583 inst_len = priv->ucode_init.len;
2584 data_len = priv->ucode_init_data.len;
2585
2586 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2587 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2588 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2589 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2590
2591 /* Fill BSM memory with bootstrap instructions */
2592 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2593 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2594 reg_offset += sizeof(u32), image++)
2595 _iwl_legacy_write_prph(priv, reg_offset,
2596 le32_to_cpu(*image));
2597
2598 rc = iwl3945_verify_bsm(priv);
2599 if (rc)
2600 return rc;
2601
2602 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2603 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
2604 iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
2605 IWL39_RTC_INST_LOWER_BOUND);
2606 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2607
2608 /* Load bootstrap code into instruction SRAM now,
2609 * to prepare to load "initialize" uCode */
2610 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2611 BSM_WR_CTRL_REG_BIT_START);
2612
2613 /* Wait for load of bootstrap uCode to finish */
2614 for (i = 0; i < 100; i++) {
2615 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
2616 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2617 break;
2618 udelay(10);
2619 }
2620 if (i < 100)
2621 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
2622 else {
2623 IWL_ERR(priv, "BSM write did not complete!\n");
2624 return -EIO;
2625 }
2626
2627 /* Enable future boot loads whenever power management unit triggers it
2628 * (e.g. when powering back up after power-save shutdown) */
2629 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2630 BSM_WR_CTRL_REG_BIT_START_EN);
2631
2632 return 0;
2633}
2634
2635static struct iwl_hcmd_ops iwl3945_hcmd = {
2636 .rxon_assoc = iwl3945_send_rxon_assoc,
2637 .commit_rxon = iwl3945_commit_rxon,
2638};
2639
2640static struct iwl_lib_ops iwl3945_lib = {
2641 .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
2642 .txq_free_tfd = iwl3945_hw_txq_free_tfd,
2643 .txq_init = iwl3945_hw_tx_queue_init,
2644 .load_ucode = iwl3945_load_bsm,
2645 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2646 .apm_ops = {
2647 .init = iwl3945_apm_init,
2648 .config = iwl3945_nic_config,
2649 },
2650 .eeprom_ops = {
2651 .regulatory_bands = {
2652 EEPROM_REGULATORY_BAND_1_CHANNELS,
2653 EEPROM_REGULATORY_BAND_2_CHANNELS,
2654 EEPROM_REGULATORY_BAND_3_CHANNELS,
2655 EEPROM_REGULATORY_BAND_4_CHANNELS,
2656 EEPROM_REGULATORY_BAND_5_CHANNELS,
2657 EEPROM_REGULATORY_BAND_NO_HT40,
2658 EEPROM_REGULATORY_BAND_NO_HT40,
2659 },
2660 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2661 .release_semaphore = iwl3945_eeprom_release_semaphore,
2662 },
2663 .send_tx_power = iwl3945_send_tx_power,
2664 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2665
2666 .debugfs_ops = {
2667 .rx_stats_read = iwl3945_ucode_rx_stats_read,
2668 .tx_stats_read = iwl3945_ucode_tx_stats_read,
2669 .general_stats_read = iwl3945_ucode_general_stats_read,
2670 },
2671};
2672
2673static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2674 .post_associate = iwl3945_post_associate,
2675 .config_ap = iwl3945_config_ap,
2676 .manage_ibss_station = iwl3945_manage_ibss_station,
2677};
2678
2679static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2680 .get_hcmd_size = iwl3945_get_hcmd_size,
2681 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2682 .request_scan = iwl3945_request_scan,
2683 .post_scan = iwl3945_post_scan,
2684};
2685
2686static const struct iwl_ops iwl3945_ops = {
2687 .lib = &iwl3945_lib,
2688 .hcmd = &iwl3945_hcmd,
2689 .utils = &iwl3945_hcmd_utils,
2690 .led = &iwl3945_led_ops,
2691 .legacy = &iwl3945_legacy_ops,
2692 .ieee80211_ops = &iwl3945_hw_ops,
2693};
2694
2695static struct iwl_base_params iwl3945_base_params = {
2696 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2697 .num_of_queues = IWL39_NUM_QUEUES,
2698 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2699 .set_l0s = false,
2700 .use_bsm = true,
2701 .led_compensation = 64,
2702 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2703};
2704
2705static struct iwl_cfg iwl3945_bg_cfg = {
2706 .name = "3945BG",
2707 .fw_name_pre = IWL3945_FW_PRE,
2708 .ucode_api_max = IWL3945_UCODE_API_MAX,
2709 .ucode_api_min = IWL3945_UCODE_API_MIN,
2710 .sku = IWL_SKU_G,
2711 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2712 .ops = &iwl3945_ops,
2713 .mod_params = &iwl3945_mod_params,
2714 .base_params = &iwl3945_base_params,
2715 .led_mode = IWL_LED_BLINK,
2716};
2717
2718static struct iwl_cfg iwl3945_abg_cfg = {
2719 .name = "3945ABG",
2720 .fw_name_pre = IWL3945_FW_PRE,
2721 .ucode_api_max = IWL3945_UCODE_API_MAX,
2722 .ucode_api_min = IWL3945_UCODE_API_MIN,
2723 .sku = IWL_SKU_A|IWL_SKU_G,
2724 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2725 .ops = &iwl3945_ops,
2726 .mod_params = &iwl3945_mod_params,
2727 .base_params = &iwl3945_base_params,
2728 .led_mode = IWL_LED_BLINK,
2729};
2730
2731DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
2732 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
2733 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
2734 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
2735 {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
2736 {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
2737 {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
2738 {0}
2739};
2740
2741MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
deleted file mode 100644
index b118b59b71de..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.h
+++ /dev/null
@@ -1,308 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-3945.h) for driver implementation definitions.
28 * Please use iwl-3945-commands.h for uCode API definitions.
29 * Please use iwl-3945-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_3945_h__
33#define __iwl_3945_h__
34
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h>
38
39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern const struct pci_device_id iwl3945_hw_card_ids[];
41
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-3945-hw.h"
46#include "iwl-debug.h"
47#include "iwl-power.h"
48#include "iwl-dev.h"
49#include "iwl-led.h"
50
51/* Highest firmware API version supported */
52#define IWL3945_UCODE_API_MAX 2
53
54/* Lowest firmware API version supported */
55#define IWL3945_UCODE_API_MIN 1
56
57#define IWL3945_FW_PRE "iwlwifi-3945-"
58#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
59#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
60
61/* Default noise level to report when noise measurement is not available.
62 * This may be because we're:
63 * 1) Not associated (4965, no beacon statistics being sent to driver)
64 * 2) Scanning (noise measurement does not apply to associated channel)
65 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
66 * Use default noise value of -127 ... this is below the range of measurable
67 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
68 * Also, -127 works better than 0 when averaging frames with/without
69 * noise info (e.g. averaging might be done in app); measured dBm values are
70 * always negative ... using a negative value as the default keeps all
71 * averages within an s8's (used in some apps) range of negative values. */
72#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
73
74/* Module parameters accessible from iwl-*.c */
75extern struct iwl_mod_params iwl3945_mod_params;
76
77struct iwl3945_rate_scale_data {
78 u64 data;
79 s32 success_counter;
80 s32 success_ratio;
81 s32 counter;
82 s32 average_tpt;
83 unsigned long stamp;
84};
85
86struct iwl3945_rs_sta {
87 spinlock_t lock;
88 struct iwl_priv *priv;
89 s32 *expected_tpt;
90 unsigned long last_partial_flush;
91 unsigned long last_flush;
92 u32 flush_time;
93 u32 last_tx_packets;
94 u32 tx_packets;
95 u8 tgg;
96 u8 flush_pending;
97 u8 start_rate;
98 struct timer_list rate_scale_flush;
99 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
100#ifdef CONFIG_MAC80211_DEBUGFS
101 struct dentry *rs_sta_dbgfs_stats_table_file;
102#endif
103
104 /* used to be in sta_info */
105 int last_txrate_idx;
106};
107
108
109/*
110 * The common struct MUST be first because it is shared between
111 * 3945 and 4965!
112 */
113struct iwl3945_sta_priv {
114 struct iwl_station_priv_common common;
115 struct iwl3945_rs_sta rs_sta;
116};
117
118enum iwl3945_antenna {
119 IWL_ANTENNA_DIVERSITY,
120 IWL_ANTENNA_MAIN,
121 IWL_ANTENNA_AUX
122};
123
124/*
125 * RTS threshold here is total size [2347] minus 4 FCS bytes
126 * Per spec:
127 * a value of 0 means RTS on all data/management packets
128 * a value > max MSDU size means no RTS
129 * else RTS for data/management frames where MPDU is larger
130 * than RTS value.
131 */
132#define DEFAULT_RTS_THRESHOLD 2347U
133#define MIN_RTS_THRESHOLD 0U
134#define MAX_RTS_THRESHOLD 2347U
135#define MAX_MSDU_SIZE 2304U
136#define MAX_MPDU_SIZE 2346U
137#define DEFAULT_BEACON_INTERVAL 100U
138#define DEFAULT_SHORT_RETRY_LIMIT 7U
139#define DEFAULT_LONG_RETRY_LIMIT 4U
140
141#define IWL_TX_FIFO_AC0 0
142#define IWL_TX_FIFO_AC1 1
143#define IWL_TX_FIFO_AC2 2
144#define IWL_TX_FIFO_AC3 3
145#define IWL_TX_FIFO_HCCA_1 5
146#define IWL_TX_FIFO_HCCA_2 6
147#define IWL_TX_FIFO_NONE 7
148
149#define IEEE80211_DATA_LEN 2304
150#define IEEE80211_4ADDR_LEN 30
151#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
152#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
153
154struct iwl3945_frame {
155 union {
156 struct ieee80211_hdr frame;
157 struct iwl3945_tx_beacon_cmd beacon;
158 u8 raw[IEEE80211_FRAME_LEN];
159 u8 cmd[360];
160 } u;
161 struct list_head list;
162};
163
164#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
165#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
166#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
167
168#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
169#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
170#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
171
172#define IWL_SUPPORTED_RATES_IE_LEN 8
173
174#define SCAN_INTERVAL 100
175
176#define MAX_TID_COUNT 9
177
178#define IWL_INVALID_RATE 0xFF
179#define IWL_INVALID_VALUE -1
180
181#define STA_PS_STATUS_WAKE 0
182#define STA_PS_STATUS_SLEEP 1
183
184struct iwl3945_ibss_seq {
185 u8 mac[ETH_ALEN];
186 u16 seq_num;
187 u16 frag_num;
188 unsigned long packet_time;
189 struct list_head list;
190};
191
192#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
193 x->u.rx_frame.stats.payload + \
194 x->u.rx_frame.stats.phy_count))
195#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
196 IWL_RX_HDR(x)->payload + \
197 le16_to_cpu(IWL_RX_HDR(x)->len)))
198#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
199#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
200
201
202/******************************************************************************
203 *
204 * Functions implemented in iwl3945-base.c which are forward declared here
205 * for use by iwl-*.c
206 *
207 *****************************************************************************/
208extern int iwl3945_calc_db_from_ratio(int sig_ratio);
209extern void iwl3945_rx_replenish(void *data);
210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
212 struct ieee80211_hdr *hdr, int left);
213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
214 char **buf, bool display);
215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
216
217/******************************************************************************
218 *
219 * Functions implemented in iwl-[34]*.c which are forward declared here
220 * for use by iwl3945-base.c
221 *
222 * NOTE: The implementation of these functions are hardware specific
223 * which is why they are in the hardware specific files (vs. iwl-base.c)
224 *
225 * Naming convention --
226 * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
227 * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
228 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
229 * iwl3945_bg_ <-- Called from work queue context
230 * iwl3945_mac_ <-- mac80211 callback
231 *
232 ****************************************************************************/
233extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
234extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
235extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
236extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
237extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
238extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
239extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
240extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
241extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
242extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
243extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
244 struct iwl_tx_queue *txq,
245 dma_addr_t addr, u16 len,
246 u8 reset, u8 pad);
247extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
248 struct iwl_tx_queue *txq);
249extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
250extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
251 struct iwl_tx_queue *txq);
252extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
253 struct iwl3945_frame *frame, u8 rate);
254void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
255 struct iwl_device_cmd *cmd,
256 struct ieee80211_tx_info *info,
257 struct ieee80211_hdr *hdr,
258 int sta_id, int tx_id);
259extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
260extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
261extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
262 struct iwl_rx_mem_buffer *rxb);
263void iwl3945_reply_statistics(struct iwl_priv *priv,
264 struct iwl_rx_mem_buffer *rxb);
265extern void iwl3945_disable_events(struct iwl_priv *priv);
266extern int iwl4965_get_temperature(const struct iwl_priv *priv);
267extern void iwl3945_post_associate(struct iwl_priv *priv);
268extern void iwl3945_config_ap(struct iwl_priv *priv);
269
270extern int iwl3945_commit_rxon(struct iwl_priv *priv,
271 struct iwl_rxon_context *ctx);
272
273/**
274 * iwl3945_hw_find_station - Find station id for a given BSSID
275 * @bssid: MAC address of station ID to find
276 *
277 * NOTE: This should not be hardware specific but the code has
278 * not yet been merged into a single common layer for managing the
279 * station tables.
280 */
281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
282
283extern struct ieee80211_ops iwl3945_hw_ops;
284
285/*
286 * Forward declare iwl-3945.c functions for iwl3945-base.c
287 */
288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
290extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
291extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
292
293extern const struct iwl_channel_info *iwl3945_get_channel_info(
294 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
295
296extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
297
298/* scanning */
299int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
300void iwl3945_post_scan(struct iwl_priv *priv);
301
302/* rates */
303extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
304
305/* Requires full declaration of iwl_priv before including */
306#include "iwl-io.h"
307
308#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
deleted file mode 100644
index f46c80e6e005..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_4965_calib_h__
63#define __iwl_4965_calib_h__
64
65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-commands.h"
68
69void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
70void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
71void iwl4965_init_sensitivity(struct iwl_priv *priv);
72void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
73void iwl4965_calib_free_results(struct iwl_priv *priv);
74
75#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
deleted file mode 100644
index 1c93665766e4..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
+++ /dev/null
@@ -1,774 +0,0 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "iwl-4965.h"
29#include "iwl-4965-debugfs.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
37{
38 int p = 0;
39 u32 flag;
40
41 flag = le32_to_cpu(priv->_4965.statistics.flag);
42
43 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
44 if (flag & UCODE_STATISTICS_CLEAR_MSK)
45 p += scnprintf(buf + p, bufsz - p,
46 "\tStatistics have been cleared\n");
47 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
48 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
49 ? "2.4 GHz" : "5.2 GHz");
50 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
51 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
52 ? "enabled" : "disabled");
53
54 return p;
55}
56
57ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
58 size_t count, loff_t *ppos)
59{
60 struct iwl_priv *priv = file->private_data;
61 int pos = 0;
62 char *buf;
63 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
64 sizeof(struct statistics_rx_non_phy) * 40 +
65 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
66 ssize_t ret;
67 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
68 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
69 struct statistics_rx_non_phy *general, *accum_general;
70 struct statistics_rx_non_phy *delta_general, *max_general;
71 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
72
73 if (!iwl_legacy_is_alive(priv))
74 return -EAGAIN;
75
76 buf = kzalloc(bufsz, GFP_KERNEL);
77 if (!buf) {
78 IWL_ERR(priv, "Can not allocate Buffer\n");
79 return -ENOMEM;
80 }
81
82 /*
83 * the statistic information display here is based on
84 * the last statistics notification from uCode
85 * might not reflect the current uCode activity
86 */
87 ofdm = &priv->_4965.statistics.rx.ofdm;
88 cck = &priv->_4965.statistics.rx.cck;
89 general = &priv->_4965.statistics.rx.general;
90 ht = &priv->_4965.statistics.rx.ofdm_ht;
91 accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
92 accum_cck = &priv->_4965.accum_statistics.rx.cck;
93 accum_general = &priv->_4965.accum_statistics.rx.general;
94 accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
95 delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
96 delta_cck = &priv->_4965.delta_statistics.rx.cck;
97 delta_general = &priv->_4965.delta_statistics.rx.general;
98 delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
99 max_ofdm = &priv->_4965.max_delta.rx.ofdm;
100 max_cck = &priv->_4965.max_delta.rx.cck;
101 max_general = &priv->_4965.max_delta.rx.general;
102 max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
103
104 pos += iwl4965_statistics_flag(priv, buf, bufsz);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 fmt_header, "Statistics_Rx - OFDM:");
107 pos += scnprintf(buf + pos, bufsz - pos,
108 fmt_table, "ina_cnt:",
109 le32_to_cpu(ofdm->ina_cnt),
110 accum_ofdm->ina_cnt,
111 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 fmt_table, "fina_cnt:",
114 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
115 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
116 pos += scnprintf(buf + pos, bufsz - pos,
117 fmt_table, "plcp_err:",
118 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
119 delta_ofdm->plcp_err, max_ofdm->plcp_err);
120 pos += scnprintf(buf + pos, bufsz - pos,
121 fmt_table, "crc32_err:",
122 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
123 delta_ofdm->crc32_err, max_ofdm->crc32_err);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 fmt_table, "overrun_err:",
126 le32_to_cpu(ofdm->overrun_err),
127 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
128 max_ofdm->overrun_err);
129 pos += scnprintf(buf + pos, bufsz - pos,
130 fmt_table, "early_overrun_err:",
131 le32_to_cpu(ofdm->early_overrun_err),
132 accum_ofdm->early_overrun_err,
133 delta_ofdm->early_overrun_err,
134 max_ofdm->early_overrun_err);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 fmt_table, "crc32_good:",
137 le32_to_cpu(ofdm->crc32_good),
138 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
139 max_ofdm->crc32_good);
140 pos += scnprintf(buf + pos, bufsz - pos,
141 fmt_table, "false_alarm_cnt:",
142 le32_to_cpu(ofdm->false_alarm_cnt),
143 accum_ofdm->false_alarm_cnt,
144 delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos += scnprintf(buf + pos, bufsz - pos,
153 fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout),
155 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
156 max_ofdm->sfd_timeout);
157 pos += scnprintf(buf + pos, bufsz - pos,
158 fmt_table, "fina_timeout:",
159 le32_to_cpu(ofdm->fina_timeout),
160 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 fmt_table, "unresponded_rts:",
164 le32_to_cpu(ofdm->unresponded_rts),
165 accum_ofdm->unresponded_rts,
166 delta_ofdm->unresponded_rts,
167 max_ofdm->unresponded_rts);
168 pos += scnprintf(buf + pos, bufsz - pos,
169 fmt_table, "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos += scnprintf(buf + pos, bufsz - pos,
175 fmt_table, "sent_ack_cnt:",
176 le32_to_cpu(ofdm->sent_ack_cnt),
177 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
178 max_ofdm->sent_ack_cnt);
179 pos += scnprintf(buf + pos, bufsz - pos,
180 fmt_table, "sent_cts_cnt:",
181 le32_to_cpu(ofdm->sent_cts_cnt),
182 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
183 max_ofdm->sent_cts_cnt);
184 pos += scnprintf(buf + pos, bufsz - pos,
185 fmt_table, "sent_ba_rsp_cnt:",
186 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
187 accum_ofdm->sent_ba_rsp_cnt,
188 delta_ofdm->sent_ba_rsp_cnt,
189 max_ofdm->sent_ba_rsp_cnt);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 fmt_table, "dsp_self_kill:",
192 le32_to_cpu(ofdm->dsp_self_kill),
193 accum_ofdm->dsp_self_kill,
194 delta_ofdm->dsp_self_kill,
195 max_ofdm->dsp_self_kill);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 fmt_table, "mh_format_err:",
198 le32_to_cpu(ofdm->mh_format_err),
199 accum_ofdm->mh_format_err,
200 delta_ofdm->mh_format_err,
201 max_ofdm->mh_format_err);
202 pos += scnprintf(buf + pos, bufsz - pos,
203 fmt_table, "re_acq_main_rssi_sum:",
204 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
205 accum_ofdm->re_acq_main_rssi_sum,
206 delta_ofdm->re_acq_main_rssi_sum,
207 max_ofdm->re_acq_main_rssi_sum);
208
209 pos += scnprintf(buf + pos, bufsz - pos,
210 fmt_header, "Statistics_Rx - CCK:");
211 pos += scnprintf(buf + pos, bufsz - pos,
212 fmt_table, "ina_cnt:",
213 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
214 delta_cck->ina_cnt, max_cck->ina_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 fmt_table, "fina_cnt:",
217 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
218 delta_cck->fina_cnt, max_cck->fina_cnt);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 fmt_table, "plcp_err:",
221 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
222 delta_cck->plcp_err, max_cck->plcp_err);
223 pos += scnprintf(buf + pos, bufsz - pos,
224 fmt_table, "crc32_err:",
225 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
226 delta_cck->crc32_err, max_cck->crc32_err);
227 pos += scnprintf(buf + pos, bufsz - pos,
228 fmt_table, "overrun_err:",
229 le32_to_cpu(cck->overrun_err),
230 accum_cck->overrun_err, delta_cck->overrun_err,
231 max_cck->overrun_err);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 fmt_table, "early_overrun_err:",
234 le32_to_cpu(cck->early_overrun_err),
235 accum_cck->early_overrun_err,
236 delta_cck->early_overrun_err,
237 max_cck->early_overrun_err);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 fmt_table, "crc32_good:",
240 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
241 delta_cck->crc32_good, max_cck->crc32_good);
242 pos += scnprintf(buf + pos, bufsz - pos,
243 fmt_table, "false_alarm_cnt:",
244 le32_to_cpu(cck->false_alarm_cnt),
245 accum_cck->false_alarm_cnt,
246 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 fmt_table, "fina_sync_err_cnt:",
249 le32_to_cpu(cck->fina_sync_err_cnt),
250 accum_cck->fina_sync_err_cnt,
251 delta_cck->fina_sync_err_cnt,
252 max_cck->fina_sync_err_cnt);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 fmt_table, "sfd_timeout:",
255 le32_to_cpu(cck->sfd_timeout),
256 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
257 max_cck->sfd_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 fmt_table, "fina_timeout:",
260 le32_to_cpu(cck->fina_timeout),
261 accum_cck->fina_timeout, delta_cck->fina_timeout,
262 max_cck->fina_timeout);
263 pos += scnprintf(buf + pos, bufsz - pos,
264 fmt_table, "unresponded_rts:",
265 le32_to_cpu(cck->unresponded_rts),
266 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
267 max_cck->unresponded_rts);
268 pos += scnprintf(buf + pos, bufsz - pos,
269 fmt_table, "rxe_frame_lmt_ovrun:",
270 le32_to_cpu(cck->rxe_frame_limit_overrun),
271 accum_cck->rxe_frame_limit_overrun,
272 delta_cck->rxe_frame_limit_overrun,
273 max_cck->rxe_frame_limit_overrun);
274 pos += scnprintf(buf + pos, bufsz - pos,
275 fmt_table, "sent_ack_cnt:",
276 le32_to_cpu(cck->sent_ack_cnt),
277 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
278 max_cck->sent_ack_cnt);
279 pos += scnprintf(buf + pos, bufsz - pos,
280 fmt_table, "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
283 max_cck->sent_cts_cnt);
284 pos += scnprintf(buf + pos, bufsz - pos,
285 fmt_table, "sent_ba_rsp_cnt:",
286 le32_to_cpu(cck->sent_ba_rsp_cnt),
287 accum_cck->sent_ba_rsp_cnt,
288 delta_cck->sent_ba_rsp_cnt,
289 max_cck->sent_ba_rsp_cnt);
290 pos += scnprintf(buf + pos, bufsz - pos,
291 fmt_table, "dsp_self_kill:",
292 le32_to_cpu(cck->dsp_self_kill),
293 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
294 max_cck->dsp_self_kill);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 fmt_table, "mh_format_err:",
297 le32_to_cpu(cck->mh_format_err),
298 accum_cck->mh_format_err, delta_cck->mh_format_err,
299 max_cck->mh_format_err);
300 pos += scnprintf(buf + pos, bufsz - pos,
301 fmt_table, "re_acq_main_rssi_sum:",
302 le32_to_cpu(cck->re_acq_main_rssi_sum),
303 accum_cck->re_acq_main_rssi_sum,
304 delta_cck->re_acq_main_rssi_sum,
305 max_cck->re_acq_main_rssi_sum);
306
307 pos += scnprintf(buf + pos, bufsz - pos,
308 fmt_header, "Statistics_Rx - GENERAL:");
309 pos += scnprintf(buf + pos, bufsz - pos,
310 fmt_table, "bogus_cts:",
311 le32_to_cpu(general->bogus_cts),
312 accum_general->bogus_cts, delta_general->bogus_cts,
313 max_general->bogus_cts);
314 pos += scnprintf(buf + pos, bufsz - pos,
315 fmt_table, "bogus_ack:",
316 le32_to_cpu(general->bogus_ack),
317 accum_general->bogus_ack, delta_general->bogus_ack,
318 max_general->bogus_ack);
319 pos += scnprintf(buf + pos, bufsz - pos,
320 fmt_table, "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 fmt_table, "filtered_frames:",
327 le32_to_cpu(general->filtered_frames),
328 accum_general->filtered_frames,
329 delta_general->filtered_frames,
330 max_general->filtered_frames);
331 pos += scnprintf(buf + pos, bufsz - pos,
332 fmt_table, "non_channel_beacons:",
333 le32_to_cpu(general->non_channel_beacons),
334 accum_general->non_channel_beacons,
335 delta_general->non_channel_beacons,
336 max_general->non_channel_beacons);
337 pos += scnprintf(buf + pos, bufsz - pos,
338 fmt_table, "channel_beacons:",
339 le32_to_cpu(general->channel_beacons),
340 accum_general->channel_beacons,
341 delta_general->channel_beacons,
342 max_general->channel_beacons);
343 pos += scnprintf(buf + pos, bufsz - pos,
344 fmt_table, "num_missed_bcon:",
345 le32_to_cpu(general->num_missed_bcon),
346 accum_general->num_missed_bcon,
347 delta_general->num_missed_bcon,
348 max_general->num_missed_bcon);
349 pos += scnprintf(buf + pos, bufsz - pos,
350 fmt_table, "adc_rx_saturation_time:",
351 le32_to_cpu(general->adc_rx_saturation_time),
352 accum_general->adc_rx_saturation_time,
353 delta_general->adc_rx_saturation_time,
354 max_general->adc_rx_saturation_time);
355 pos += scnprintf(buf + pos, bufsz - pos,
356 fmt_table, "ina_detect_search_tm:",
357 le32_to_cpu(general->ina_detection_search_time),
358 accum_general->ina_detection_search_time,
359 delta_general->ina_detection_search_time,
360 max_general->ina_detection_search_time);
361 pos += scnprintf(buf + pos, bufsz - pos,
362 fmt_table, "beacon_silence_rssi_a:",
363 le32_to_cpu(general->beacon_silence_rssi_a),
364 accum_general->beacon_silence_rssi_a,
365 delta_general->beacon_silence_rssi_a,
366 max_general->beacon_silence_rssi_a);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 fmt_table, "beacon_silence_rssi_b:",
369 le32_to_cpu(general->beacon_silence_rssi_b),
370 accum_general->beacon_silence_rssi_b,
371 delta_general->beacon_silence_rssi_b,
372 max_general->beacon_silence_rssi_b);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 fmt_table, "beacon_silence_rssi_c:",
375 le32_to_cpu(general->beacon_silence_rssi_c),
376 accum_general->beacon_silence_rssi_c,
377 delta_general->beacon_silence_rssi_c,
378 max_general->beacon_silence_rssi_c);
379 pos += scnprintf(buf + pos, bufsz - pos,
380 fmt_table, "interference_data_flag:",
381 le32_to_cpu(general->interference_data_flag),
382 accum_general->interference_data_flag,
383 delta_general->interference_data_flag,
384 max_general->interference_data_flag);
385 pos += scnprintf(buf + pos, bufsz - pos,
386 fmt_table, "channel_load:",
387 le32_to_cpu(general->channel_load),
388 accum_general->channel_load,
389 delta_general->channel_load,
390 max_general->channel_load);
391 pos += scnprintf(buf + pos, bufsz - pos,
392 fmt_table, "dsp_false_alarms:",
393 le32_to_cpu(general->dsp_false_alarms),
394 accum_general->dsp_false_alarms,
395 delta_general->dsp_false_alarms,
396 max_general->dsp_false_alarms);
397 pos += scnprintf(buf + pos, bufsz - pos,
398 fmt_table, "beacon_rssi_a:",
399 le32_to_cpu(general->beacon_rssi_a),
400 accum_general->beacon_rssi_a,
401 delta_general->beacon_rssi_a,
402 max_general->beacon_rssi_a);
403 pos += scnprintf(buf + pos, bufsz - pos,
404 fmt_table, "beacon_rssi_b:",
405 le32_to_cpu(general->beacon_rssi_b),
406 accum_general->beacon_rssi_b,
407 delta_general->beacon_rssi_b,
408 max_general->beacon_rssi_b);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 fmt_table, "beacon_rssi_c:",
411 le32_to_cpu(general->beacon_rssi_c),
412 accum_general->beacon_rssi_c,
413 delta_general->beacon_rssi_c,
414 max_general->beacon_rssi_c);
415 pos += scnprintf(buf + pos, bufsz - pos,
416 fmt_table, "beacon_energy_a:",
417 le32_to_cpu(general->beacon_energy_a),
418 accum_general->beacon_energy_a,
419 delta_general->beacon_energy_a,
420 max_general->beacon_energy_a);
421 pos += scnprintf(buf + pos, bufsz - pos,
422 fmt_table, "beacon_energy_b:",
423 le32_to_cpu(general->beacon_energy_b),
424 accum_general->beacon_energy_b,
425 delta_general->beacon_energy_b,
426 max_general->beacon_energy_b);
427 pos += scnprintf(buf + pos, bufsz - pos,
428 fmt_table, "beacon_energy_c:",
429 le32_to_cpu(general->beacon_energy_c),
430 accum_general->beacon_energy_c,
431 delta_general->beacon_energy_c,
432 max_general->beacon_energy_c);
433
434 pos += scnprintf(buf + pos, bufsz - pos,
435 fmt_header, "Statistics_Rx - OFDM_HT:");
436 pos += scnprintf(buf + pos, bufsz - pos,
437 fmt_table, "plcp_err:",
438 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
439 delta_ht->plcp_err, max_ht->plcp_err);
440 pos += scnprintf(buf + pos, bufsz - pos,
441 fmt_table, "overrun_err:",
442 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
443 delta_ht->overrun_err, max_ht->overrun_err);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 fmt_table, "early_overrun_err:",
446 le32_to_cpu(ht->early_overrun_err),
447 accum_ht->early_overrun_err,
448 delta_ht->early_overrun_err,
449 max_ht->early_overrun_err);
450 pos += scnprintf(buf + pos, bufsz - pos,
451 fmt_table, "crc32_good:",
452 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
453 delta_ht->crc32_good, max_ht->crc32_good);
454 pos += scnprintf(buf + pos, bufsz - pos,
455 fmt_table, "crc32_err:",
456 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
457 delta_ht->crc32_err, max_ht->crc32_err);
458 pos += scnprintf(buf + pos, bufsz - pos,
459 fmt_table, "mh_format_err:",
460 le32_to_cpu(ht->mh_format_err),
461 accum_ht->mh_format_err,
462 delta_ht->mh_format_err, max_ht->mh_format_err);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 fmt_table, "agg_crc32_good:",
465 le32_to_cpu(ht->agg_crc32_good),
466 accum_ht->agg_crc32_good,
467 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
468 pos += scnprintf(buf + pos, bufsz - pos,
469 fmt_table, "agg_mpdu_cnt:",
470 le32_to_cpu(ht->agg_mpdu_cnt),
471 accum_ht->agg_mpdu_cnt,
472 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 fmt_table, "agg_cnt:",
475 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
476 delta_ht->agg_cnt, max_ht->agg_cnt);
477 pos += scnprintf(buf + pos, bufsz - pos,
478 fmt_table, "unsupport_mcs:",
479 le32_to_cpu(ht->unsupport_mcs),
480 accum_ht->unsupport_mcs,
481 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
482
483 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
484 kfree(buf);
485 return ret;
486}
487
488ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
489 char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct iwl_priv *priv = file->private_data;
493 int pos = 0;
494 char *buf;
495 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
496 ssize_t ret;
497 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
498
499 if (!iwl_legacy_is_alive(priv))
500 return -EAGAIN;
501
502 buf = kzalloc(bufsz, GFP_KERNEL);
503 if (!buf) {
504 IWL_ERR(priv, "Can not allocate Buffer\n");
505 return -ENOMEM;
506 }
507
508 /* the statistic information display here is based on
509 * the last statistics notification from uCode
510 * might not reflect the current uCode activity
511 */
512 tx = &priv->_4965.statistics.tx;
513 accum_tx = &priv->_4965.accum_statistics.tx;
514 delta_tx = &priv->_4965.delta_statistics.tx;
515 max_tx = &priv->_4965.max_delta.tx;
516
517 pos += iwl4965_statistics_flag(priv, buf, bufsz);
518 pos += scnprintf(buf + pos, bufsz - pos,
519 fmt_header, "Statistics_Tx:");
520 pos += scnprintf(buf + pos, bufsz - pos,
521 fmt_table, "preamble:",
522 le32_to_cpu(tx->preamble_cnt),
523 accum_tx->preamble_cnt,
524 delta_tx->preamble_cnt, max_tx->preamble_cnt);
525 pos += scnprintf(buf + pos, bufsz - pos,
526 fmt_table, "rx_detected_cnt:",
527 le32_to_cpu(tx->rx_detected_cnt),
528 accum_tx->rx_detected_cnt,
529 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
530 pos += scnprintf(buf + pos, bufsz - pos,
531 fmt_table, "bt_prio_defer_cnt:",
532 le32_to_cpu(tx->bt_prio_defer_cnt),
533 accum_tx->bt_prio_defer_cnt,
534 delta_tx->bt_prio_defer_cnt,
535 max_tx->bt_prio_defer_cnt);
536 pos += scnprintf(buf + pos, bufsz - pos,
537 fmt_table, "bt_prio_kill_cnt:",
538 le32_to_cpu(tx->bt_prio_kill_cnt),
539 accum_tx->bt_prio_kill_cnt,
540 delta_tx->bt_prio_kill_cnt,
541 max_tx->bt_prio_kill_cnt);
542 pos += scnprintf(buf + pos, bufsz - pos,
543 fmt_table, "few_bytes_cnt:",
544 le32_to_cpu(tx->few_bytes_cnt),
545 accum_tx->few_bytes_cnt,
546 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
547 pos += scnprintf(buf + pos, bufsz - pos,
548 fmt_table, "cts_timeout:",
549 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
550 delta_tx->cts_timeout, max_tx->cts_timeout);
551 pos += scnprintf(buf + pos, bufsz - pos,
552 fmt_table, "ack_timeout:",
553 le32_to_cpu(tx->ack_timeout),
554 accum_tx->ack_timeout,
555 delta_tx->ack_timeout, max_tx->ack_timeout);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 fmt_table, "expected_ack_cnt:",
558 le32_to_cpu(tx->expected_ack_cnt),
559 accum_tx->expected_ack_cnt,
560 delta_tx->expected_ack_cnt,
561 max_tx->expected_ack_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 fmt_table, "actual_ack_cnt:",
564 le32_to_cpu(tx->actual_ack_cnt),
565 accum_tx->actual_ack_cnt,
566 delta_tx->actual_ack_cnt,
567 max_tx->actual_ack_cnt);
568 pos += scnprintf(buf + pos, bufsz - pos,
569 fmt_table, "dump_msdu_cnt:",
570 le32_to_cpu(tx->dump_msdu_cnt),
571 accum_tx->dump_msdu_cnt,
572 delta_tx->dump_msdu_cnt,
573 max_tx->dump_msdu_cnt);
574 pos += scnprintf(buf + pos, bufsz - pos,
575 fmt_table, "abort_nxt_frame_mismatch:",
576 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
577 accum_tx->burst_abort_next_frame_mismatch_cnt,
578 delta_tx->burst_abort_next_frame_mismatch_cnt,
579 max_tx->burst_abort_next_frame_mismatch_cnt);
580 pos += scnprintf(buf + pos, bufsz - pos,
581 fmt_table, "abort_missing_nxt_frame:",
582 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
583 accum_tx->burst_abort_missing_next_frame_cnt,
584 delta_tx->burst_abort_missing_next_frame_cnt,
585 max_tx->burst_abort_missing_next_frame_cnt);
586 pos += scnprintf(buf + pos, bufsz - pos,
587 fmt_table, "cts_timeout_collision:",
588 le32_to_cpu(tx->cts_timeout_collision),
589 accum_tx->cts_timeout_collision,
590 delta_tx->cts_timeout_collision,
591 max_tx->cts_timeout_collision);
592 pos += scnprintf(buf + pos, bufsz - pos,
593 fmt_table, "ack_ba_timeout_collision:",
594 le32_to_cpu(tx->ack_or_ba_timeout_collision),
595 accum_tx->ack_or_ba_timeout_collision,
596 delta_tx->ack_or_ba_timeout_collision,
597 max_tx->ack_or_ba_timeout_collision);
598 pos += scnprintf(buf + pos, bufsz - pos,
599 fmt_table, "agg ba_timeout:",
600 le32_to_cpu(tx->agg.ba_timeout),
601 accum_tx->agg.ba_timeout,
602 delta_tx->agg.ba_timeout,
603 max_tx->agg.ba_timeout);
604 pos += scnprintf(buf + pos, bufsz - pos,
605 fmt_table, "agg ba_resched_frames:",
606 le32_to_cpu(tx->agg.ba_reschedule_frames),
607 accum_tx->agg.ba_reschedule_frames,
608 delta_tx->agg.ba_reschedule_frames,
609 max_tx->agg.ba_reschedule_frames);
610 pos += scnprintf(buf + pos, bufsz - pos,
611 fmt_table, "agg scd_query_agg_frame:",
612 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
613 accum_tx->agg.scd_query_agg_frame_cnt,
614 delta_tx->agg.scd_query_agg_frame_cnt,
615 max_tx->agg.scd_query_agg_frame_cnt);
616 pos += scnprintf(buf + pos, bufsz - pos,
617 fmt_table, "agg scd_query_no_agg:",
618 le32_to_cpu(tx->agg.scd_query_no_agg),
619 accum_tx->agg.scd_query_no_agg,
620 delta_tx->agg.scd_query_no_agg,
621 max_tx->agg.scd_query_no_agg);
622 pos += scnprintf(buf + pos, bufsz - pos,
623 fmt_table, "agg scd_query_agg:",
624 le32_to_cpu(tx->agg.scd_query_agg),
625 accum_tx->agg.scd_query_agg,
626 delta_tx->agg.scd_query_agg,
627 max_tx->agg.scd_query_agg);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 fmt_table, "agg scd_query_mismatch:",
630 le32_to_cpu(tx->agg.scd_query_mismatch),
631 accum_tx->agg.scd_query_mismatch,
632 delta_tx->agg.scd_query_mismatch,
633 max_tx->agg.scd_query_mismatch);
634 pos += scnprintf(buf + pos, bufsz - pos,
635 fmt_table, "agg frame_not_ready:",
636 le32_to_cpu(tx->agg.frame_not_ready),
637 accum_tx->agg.frame_not_ready,
638 delta_tx->agg.frame_not_ready,
639 max_tx->agg.frame_not_ready);
640 pos += scnprintf(buf + pos, bufsz - pos,
641 fmt_table, "agg underrun:",
642 le32_to_cpu(tx->agg.underrun),
643 accum_tx->agg.underrun,
644 delta_tx->agg.underrun, max_tx->agg.underrun);
645 pos += scnprintf(buf + pos, bufsz - pos,
646 fmt_table, "agg bt_prio_kill:",
647 le32_to_cpu(tx->agg.bt_prio_kill),
648 accum_tx->agg.bt_prio_kill,
649 delta_tx->agg.bt_prio_kill,
650 max_tx->agg.bt_prio_kill);
651 pos += scnprintf(buf + pos, bufsz - pos,
652 fmt_table, "agg rx_ba_rsp_cnt:",
653 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
654 accum_tx->agg.rx_ba_rsp_cnt,
655 delta_tx->agg.rx_ba_rsp_cnt,
656 max_tx->agg.rx_ba_rsp_cnt);
657
658 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
659 kfree(buf);
660 return ret;
661}
662
663ssize_t
664iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
665 size_t count, loff_t *ppos)
666{
667 struct iwl_priv *priv = file->private_data;
668 int pos = 0;
669 char *buf;
670 int bufsz = sizeof(struct statistics_general) * 10 + 300;
671 ssize_t ret;
672 struct statistics_general_common *general, *accum_general;
673 struct statistics_general_common *delta_general, *max_general;
674 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
675 struct statistics_div *div, *accum_div, *delta_div, *max_div;
676
677 if (!iwl_legacy_is_alive(priv))
678 return -EAGAIN;
679
680 buf = kzalloc(bufsz, GFP_KERNEL);
681 if (!buf) {
682 IWL_ERR(priv, "Can not allocate Buffer\n");
683 return -ENOMEM;
684 }
685
686 /* the statistic information display here is based on
687 * the last statistics notification from uCode
688 * might not reflect the current uCode activity
689 */
690 general = &priv->_4965.statistics.general.common;
691 dbg = &priv->_4965.statistics.general.common.dbg;
692 div = &priv->_4965.statistics.general.common.div;
693 accum_general = &priv->_4965.accum_statistics.general.common;
694 accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
695 accum_div = &priv->_4965.accum_statistics.general.common.div;
696 delta_general = &priv->_4965.delta_statistics.general.common;
697 max_general = &priv->_4965.max_delta.general.common;
698 delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
699 max_dbg = &priv->_4965.max_delta.general.common.dbg;
700 delta_div = &priv->_4965.delta_statistics.general.common.div;
701 max_div = &priv->_4965.max_delta.general.common.div;
702
703 pos += iwl4965_statistics_flag(priv, buf, bufsz);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 fmt_header, "Statistics_General:");
706 pos += scnprintf(buf + pos, bufsz - pos,
707 fmt_value, "temperature:",
708 le32_to_cpu(general->temperature));
709 pos += scnprintf(buf + pos, bufsz - pos,
710 fmt_value, "ttl_timestamp:",
711 le32_to_cpu(general->ttl_timestamp));
712 pos += scnprintf(buf + pos, bufsz - pos,
713 fmt_table, "burst_check:",
714 le32_to_cpu(dbg->burst_check),
715 accum_dbg->burst_check,
716 delta_dbg->burst_check, max_dbg->burst_check);
717 pos += scnprintf(buf + pos, bufsz - pos,
718 fmt_table, "burst_count:",
719 le32_to_cpu(dbg->burst_count),
720 accum_dbg->burst_count,
721 delta_dbg->burst_count, max_dbg->burst_count);
722 pos += scnprintf(buf + pos, bufsz - pos,
723 fmt_table, "wait_for_silence_timeout_count:",
724 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
725 accum_dbg->wait_for_silence_timeout_cnt,
726 delta_dbg->wait_for_silence_timeout_cnt,
727 max_dbg->wait_for_silence_timeout_cnt);
728 pos += scnprintf(buf + pos, bufsz - pos,
729 fmt_table, "sleep_time:",
730 le32_to_cpu(general->sleep_time),
731 accum_general->sleep_time,
732 delta_general->sleep_time, max_general->sleep_time);
733 pos += scnprintf(buf + pos, bufsz - pos,
734 fmt_table, "slots_out:",
735 le32_to_cpu(general->slots_out),
736 accum_general->slots_out,
737 delta_general->slots_out, max_general->slots_out);
738 pos += scnprintf(buf + pos, bufsz - pos,
739 fmt_table, "slots_idle:",
740 le32_to_cpu(general->slots_idle),
741 accum_general->slots_idle,
742 delta_general->slots_idle, max_general->slots_idle);
743 pos += scnprintf(buf + pos, bufsz - pos,
744 fmt_table, "tx_on_a:",
745 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
746 delta_div->tx_on_a, max_div->tx_on_a);
747 pos += scnprintf(buf + pos, bufsz - pos,
748 fmt_table, "tx_on_b:",
749 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
750 delta_div->tx_on_b, max_div->tx_on_b);
751 pos += scnprintf(buf + pos, bufsz - pos,
752 fmt_table, "exec_time:",
753 le32_to_cpu(div->exec_time), accum_div->exec_time,
754 delta_div->exec_time, max_div->exec_time);
755 pos += scnprintf(buf + pos, bufsz - pos,
756 fmt_table, "probe_time:",
757 le32_to_cpu(div->probe_time), accum_div->probe_time,
758 delta_div->probe_time, max_div->probe_time);
759 pos += scnprintf(buf + pos, bufsz - pos,
760 fmt_table, "rx_enable_counter:",
761 le32_to_cpu(general->rx_enable_counter),
762 accum_general->rx_enable_counter,
763 delta_general->rx_enable_counter,
764 max_general->rx_enable_counter);
765 pos += scnprintf(buf + pos, bufsz - pos,
766 fmt_table, "num_of_sos_states:",
767 le32_to_cpu(general->num_of_sos_states),
768 accum_general->num_of_sos_states,
769 delta_general->num_of_sos_states,
770 max_general->num_of_sos_states);
771 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
772 kfree(buf);
773 return ret;
774}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
deleted file mode 100644
index 6c8e35361a9e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl4965_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count, loff_t *ppos);
40#else
41static ssize_t
42iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos)
44{
45 return 0;
46}
47static ssize_t
48iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
49 size_t count, loff_t *ppos)
50{
51 return 0;
52}
53static ssize_t
54iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 return 0;
58}
59#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
deleted file mode 100644
index cb9baab1ff7d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
+++ /dev/null
@@ -1,154 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-4965.h"
76#include "iwl-io.h"
77
78/******************************************************************************
79 *
80 * EEPROM related functions
81 *
82******************************************************************************/
83
84/*
85 * The device's EEPROM semaphore prevents conflicts between driver and uCode
86 * when accessing the EEPROM; each access is a series of pulses to/from the
87 * EEPROM chip, not a single event, so even reads could conflict if they
88 * weren't arbitrated by the semaphore.
89 */
90int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
91{
92 u16 count;
93 int ret;
94
95 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
96 /* Request semaphore */
97 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
98 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
99
100 /* See if we got it */
101 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
102 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
103 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
104 EEPROM_SEM_TIMEOUT);
105 if (ret >= 0) {
106 IWL_DEBUG_IO(priv,
107 "Acquired semaphore after %d tries.\n",
108 count+1);
109 return ret;
110 }
111 }
112
113 return ret;
114}
115
116void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
117{
118 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
119 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
120
121}
122
123int iwl4965_eeprom_check_version(struct iwl_priv *priv)
124{
125 u16 eeprom_ver;
126 u16 calib_ver;
127
128 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
129 calib_ver = iwl_legacy_eeprom_query16(priv,
130 EEPROM_4965_CALIB_VERSION_OFFSET);
131
132 if (eeprom_ver < priv->cfg->eeprom_ver ||
133 calib_ver < priv->cfg->eeprom_calib_ver)
134 goto err;
135
136 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
137 eeprom_ver, calib_ver);
138
139 return 0;
140err:
141 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
142 "CALIB=0x%x < 0x%x\n",
143 eeprom_ver, priv->cfg->eeprom_ver,
144 calib_ver, priv->cfg->eeprom_calib_ver);
145 return -EINVAL;
146
147}
148
149void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
150{
151 const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
152 EEPROM_MAC_ADDRESS);
153 memcpy(mac, addr, ETH_ALEN);
154}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
deleted file mode 100644
index fc6fa2886d9c..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ /dev/null
@@ -1,811 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
65 * Use iwl-commands.h for uCode API definitions.
66 * Use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_4965_hw_h__
70#define __iwl_4965_hw_h__
71
72#include "iwl-fh.h"
73
74/* EEPROM */
75#define IWL4965_EEPROM_IMG_SIZE 1024
76
77/*
78 * uCode queue management definitions ...
79 * The first queue used for block-ack aggregation is #7 (4965 only).
80 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
81 */
82#define IWL49_FIRST_AMPDU_QUEUE 7
83
84/* Sizes and addresses for instruction and data memory (SRAM) in
85 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
86#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
87#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
88
89#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
90#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
91
92#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
93 IWL49_RTC_INST_LOWER_BOUND)
94#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
95 IWL49_RTC_DATA_LOWER_BOUND)
96
97#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
98#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
99
100/* Size of uCode instruction memory in bootstrap state machine */
101#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
102
103static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
104{
105 return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
106 (addr < IWL49_RTC_DATA_UPPER_BOUND);
107}
108
109/********************* START TEMPERATURE *************************************/
110
111/**
112 * 4965 temperature calculation.
113 *
114 * The driver must calculate the device temperature before calculating
115 * a txpower setting (amplifier gain is temperature dependent). The
116 * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
117 * values used for the life of the driver, and one of which (R4) is the
118 * real-time temperature indicator.
119 *
120 * uCode provides all 4 values to the driver via the "initialize alive"
121 * notification (see struct iwl4965_init_alive_resp). After the runtime uCode
122 * image loads, uCode updates the R4 value via statistics notifications
123 * (see STATISTICS_NOTIFICATION), which occur after each received beacon
124 * when associated, or can be requested via REPLY_STATISTICS_CMD.
125 *
126 * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
127 * must sign-extend to 32 bits before applying formula below.
128 *
129 * Formula:
130 *
131 * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
132 *
133 * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
134 * an additional correction, which should be centered around 0 degrees
135 * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
136 * centering the 97/100 correction around 0 degrees K.
137 *
138 * Add 273 to Kelvin value to find degrees Celsius, for comparing current
139 * temperature with factory-measured temperatures when calculating txpower
140 * settings.
141 */
142#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
143#define TEMPERATURE_CALIB_A_VAL 259
144
145/* Limit range of calculated temperature to be between these Kelvin values */
146#define IWL_TX_POWER_TEMPERATURE_MIN (263)
147#define IWL_TX_POWER_TEMPERATURE_MAX (410)
148
149#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
150 (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
151 ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
152
153/********************* END TEMPERATURE ***************************************/
154
155/********************* START TXPOWER *****************************************/
156
157/**
158 * 4965 txpower calculations rely on information from three sources:
159 *
160 * 1) EEPROM
161 * 2) "initialize" alive notification
162 * 3) statistics notifications
163 *
164 * EEPROM data consists of:
165 *
166 * 1) Regulatory information (max txpower and channel usage flags) is provided
167 * separately for each channel that can possibly supported by 4965.
168 * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
169 * (legacy) channels.
170 *
171 * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
172 * for locations in EEPROM.
173 *
174 * 2) Factory txpower calibration information is provided separately for
175 * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
176 * but 5 GHz has several sub-bands.
177 *
178 * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
179 *
180 * See struct iwl4965_eeprom_calib_info (and the tree of structures
181 * contained within it) for format, and struct iwl4965_eeprom for
182 * locations in EEPROM.
183 *
184 * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
185 * consists of:
186 *
187 * 1) Temperature calculation parameters.
188 *
189 * 2) Power supply voltage measurement.
190 *
191 * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
192 *
193 * Statistics notifications deliver:
194 *
195 * 1) Current values for temperature param R4.
196 */
197
198/**
199 * To calculate a txpower setting for a given desired target txpower, channel,
200 * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
201 * support MIMO and transmit diversity), driver must do the following:
202 *
203 * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
204 * Do not exceed regulatory limit; reduce target txpower if necessary.
205 *
206 * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
207 * 2 transmitters will be used simultaneously; driver must reduce the
208 * regulatory limit by 3 dB (half-power) for each transmitter, so the
209 * combined total output of the 2 transmitters is within regulatory limits.
210 *
211 *
212 * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
213 * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
214 * reduce target txpower if necessary.
215 *
216 * Backoff values below are in 1/2 dB units (equivalent to steps in
217 * txpower gain tables):
218 *
219 * OFDM 6 - 36 MBit: 10 steps (5 dB)
220 * OFDM 48 MBit: 15 steps (7.5 dB)
221 * OFDM 54 MBit: 17 steps (8.5 dB)
222 * OFDM 60 MBit: 20 steps (10 dB)
223 * CCK all rates: 10 steps (5 dB)
224 *
225 * Backoff values apply to saturation txpower on a per-transmitter basis;
226 * when using MIMO (2 transmitters), each transmitter uses the same
227 * saturation level provided in EEPROM, and the same backoff values;
228 * no reduction (such as with regulatory txpower limits) is required.
229 *
230 * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
231 * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
232 * factory measurement for ht40 channels.
233 *
234 * The result of this step is the final target txpower. The rest of
235 * the steps figure out the proper settings for the device to achieve
236 * that target txpower.
237 *
238 *
239 * 3) Determine (EEPROM) calibration sub band for the target channel, by
240 * comparing against first and last channels in each sub band
241 * (see struct iwl4965_eeprom_calib_subband_info).
242 *
243 *
244 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
245 * referencing the 2 factory-measured (sample) channels within the sub band.
246 *
247 * Interpolation is based on difference between target channel's frequency
248 * and the sample channels' frequencies. Since channel numbers are based
249 * on frequency (5 MHz between each channel number), this is equivalent
250 * to interpolating based on channel number differences.
251 *
252 * Note that the sample channels may or may not be the channels at the
253 * edges of the sub band. The target channel may be "outside" of the
254 * span of the sampled channels.
255 *
256 * Driver may choose the pair (for 2 Tx chains) of measurements (see
257 * struct iwl4965_eeprom_calib_ch_info) for which the actual measured
258 * txpower comes closest to the desired txpower. Usually, though,
259 * the middle set of measurements is closest to the regulatory limits,
260 * and is therefore a good choice for all txpower calculations (this
261 * assumes that high accuracy is needed for maximizing legal txpower,
262 * while lower txpower configurations do not need as much accuracy).
263 *
264 * Driver should interpolate both members of the chosen measurement pair,
265 * i.e. for both Tx chains (radio transmitters), unless the driver knows
266 * that only one of the chains will be used (e.g. only one tx antenna
267 * connected, but this should be unusual). The rate scaling algorithm
268 * switches antennas to find best performance, so both Tx chains will
269 * be used (although only one at a time) even for non-MIMO transmissions.
270 *
271 * Driver should interpolate factory values for temperature, gain table
272 * index, and actual power. The power amplifier detector values are
273 * not used by the driver.
274 *
275 * Sanity check: If the target channel happens to be one of the sample
276 * channels, the results should agree with the sample channel's
277 * measurements!
278 *
279 *
280 * 5) Find difference between desired txpower and (interpolated)
281 * factory-measured txpower. Using (interpolated) factory gain table index
282 * (shown elsewhere) as a starting point, adjust this index lower to
283 * increase txpower, or higher to decrease txpower, until the target
284 * txpower is reached. Each step in the gain table is 1/2 dB.
285 *
286 * For example, if factory measured txpower is 16 dBm, and target txpower
287 * is 13 dBm, add 6 steps to the factory gain index to reduce txpower
288 * by 3 dB.
289 *
290 *
291 * 6) Find difference between current device temperature and (interpolated)
292 * factory-measured temperature for sub-band. Factory values are in
293 * degrees Celsius. To calculate current temperature, see comments for
294 * "4965 temperature calculation".
295 *
296 * If current temperature is higher than factory temperature, driver must
297 * increase gain (lower gain table index), and vice verse.
298 *
299 * Temperature affects gain differently for different channels:
300 *
301 * 2.4 GHz all channels: 3.5 degrees per half-dB step
302 * 5 GHz channels 34-43: 4.5 degrees per half-dB step
303 * 5 GHz channels >= 44: 4.0 degrees per half-dB step
304 *
305 * NOTE: Temperature can increase rapidly when transmitting, especially
306 * with heavy traffic at high txpowers. Driver should update
307 * temperature calculations often under these conditions to
308 * maintain strong txpower in the face of rising temperature.
309 *
310 *
311 * 7) Find difference between current power supply voltage indicator
312 * (from "initialize alive") and factory-measured power supply voltage
313 * indicator (EEPROM).
314 *
315 * If the current voltage is higher (indicator is lower) than factory
316 * voltage, gain should be reduced (gain table index increased) by:
317 *
318 * (eeprom - current) / 7
319 *
320 * If the current voltage is lower (indicator is higher) than factory
321 * voltage, gain should be increased (gain table index decreased) by:
322 *
323 * 2 * (current - eeprom) / 7
324 *
325 * If number of index steps in either direction turns out to be > 2,
326 * something is wrong ... just use 0.
327 *
328 * NOTE: Voltage compensation is independent of band/channel.
329 *
330 * NOTE: "Initialize" uCode measures current voltage, which is assumed
331 * to be constant after this initial measurement. Voltage
332 * compensation for txpower (number of steps in gain table)
333 * may be calculated once and used until the next uCode bootload.
334 *
335 *
336 * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
337 * adjust txpower for each transmitter chain, so txpower is balanced
338 * between the two chains. There are 5 pairs of tx_atten[group][chain]
339 * values in "initialize alive", one pair for each of 5 channel ranges:
340 *
341 * Group 0: 5 GHz channel 34-43
342 * Group 1: 5 GHz channel 44-70
343 * Group 2: 5 GHz channel 71-124
344 * Group 3: 5 GHz channel 125-200
345 * Group 4: 2.4 GHz all channels
346 *
347 * Add the tx_atten[group][chain] value to the index for the target chain.
348 * The values are signed, but are in pairs of 0 and a non-negative number,
349 * so as to reduce gain (if necessary) of the "hotter" channel. This
350 * avoids any need to double-check for regulatory compliance after
351 * this step.
352 *
353 *
354 * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
355 * value to the index:
356 *
357 * Hardware rev B: 9 steps (4.5 dB)
358 * Hardware rev C: 5 steps (2.5 dB)
359 *
360 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
361 * bits [3:2], 1 = B, 2 = C.
362 *
363 * NOTE: This compensation is in addition to any saturation backoff that
364 * might have been applied in an earlier step.
365 *
366 *
367 * 10) Select the gain table, based on band (2.4 vs 5 GHz).
368 *
369 * Limit the adjusted index to stay within the table!
370 *
371 *
372 * 11) Read gain table entries for DSP and radio gain, place into appropriate
373 * location(s) in command (struct iwl4965_txpowertable_cmd).
374 */
375
376/**
377 * When MIMO is used (2 transmitters operating simultaneously), driver should
378 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
379 * for the device. That is, use half power for each transmitter, so total
380 * txpower is within regulatory limits.
381 *
382 * The value "6" represents number of steps in gain table to reduce power 3 dB.
383 * Each step is 1/2 dB.
384 */
385#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
386
387/**
388 * CCK gain compensation.
389 *
390 * When calculating txpowers for CCK, after making sure that the target power
391 * is within regulatory and saturation limits, driver must additionally
392 * back off gain by adding these values to the gain table index.
393 *
394 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
395 * bits [3:2], 1 = B, 2 = C.
396 */
397#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
398#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
399
400/*
401 * 4965 power supply voltage compensation for txpower
402 */
403#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
404
405/**
406 * Gain tables.
407 *
408 * The following tables contain pair of values for setting txpower, i.e.
409 * gain settings for the output of the device's digital signal processor (DSP),
410 * and for the analog gain structure of the transmitter.
411 *
412 * Each entry in the gain tables represents a step of 1/2 dB. Note that these
413 * are *relative* steps, not indications of absolute output power. Output
414 * power varies with temperature, voltage, and channel frequency, and also
415 * requires consideration of average power (to satisfy regulatory constraints),
416 * and peak power (to avoid distortion of the output signal).
417 *
418 * Each entry contains two values:
419 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
420 * linear value that multiplies the output of the digital signal processor,
421 * before being sent to the analog radio.
422 * 2) Radio gain. This sets the analog gain of the radio Tx path.
423 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
424 *
425 * EEPROM contains factory calibration data for txpower. This maps actual
426 * measured txpower levels to gain settings in the "well known" tables
427 * below ("well-known" means here that both factory calibration *and* the
428 * driver work with the same table).
429 *
430 * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
431 * has an extension (into negative indexes), in case the driver needs to
432 * boost power setting for high device temperatures (higher than would be
433 * present during factory calibration). A 5 Ghz EEPROM index of "40"
434 * corresponds to the 49th entry in the table used by the driver.
435 */
436#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
437#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
438
439/**
440 * 2.4 GHz gain table
441 *
442 * Index Dsp gain Radio gain
443 * 0 110 0x3f (highest gain)
444 * 1 104 0x3f
445 * 2 98 0x3f
446 * 3 110 0x3e
447 * 4 104 0x3e
448 * 5 98 0x3e
449 * 6 110 0x3d
450 * 7 104 0x3d
451 * 8 98 0x3d
452 * 9 110 0x3c
453 * 10 104 0x3c
454 * 11 98 0x3c
455 * 12 110 0x3b
456 * 13 104 0x3b
457 * 14 98 0x3b
458 * 15 110 0x3a
459 * 16 104 0x3a
460 * 17 98 0x3a
461 * 18 110 0x39
462 * 19 104 0x39
463 * 20 98 0x39
464 * 21 110 0x38
465 * 22 104 0x38
466 * 23 98 0x38
467 * 24 110 0x37
468 * 25 104 0x37
469 * 26 98 0x37
470 * 27 110 0x36
471 * 28 104 0x36
472 * 29 98 0x36
473 * 30 110 0x35
474 * 31 104 0x35
475 * 32 98 0x35
476 * 33 110 0x34
477 * 34 104 0x34
478 * 35 98 0x34
479 * 36 110 0x33
480 * 37 104 0x33
481 * 38 98 0x33
482 * 39 110 0x32
483 * 40 104 0x32
484 * 41 98 0x32
485 * 42 110 0x31
486 * 43 104 0x31
487 * 44 98 0x31
488 * 45 110 0x30
489 * 46 104 0x30
490 * 47 98 0x30
491 * 48 110 0x6
492 * 49 104 0x6
493 * 50 98 0x6
494 * 51 110 0x5
495 * 52 104 0x5
496 * 53 98 0x5
497 * 54 110 0x4
498 * 55 104 0x4
499 * 56 98 0x4
500 * 57 110 0x3
501 * 58 104 0x3
502 * 59 98 0x3
503 * 60 110 0x2
504 * 61 104 0x2
505 * 62 98 0x2
506 * 63 110 0x1
507 * 64 104 0x1
508 * 65 98 0x1
509 * 66 110 0x0
510 * 67 104 0x0
511 * 68 98 0x0
512 * 69 97 0
513 * 70 96 0
514 * 71 95 0
515 * 72 94 0
516 * 73 93 0
517 * 74 92 0
518 * 75 91 0
519 * 76 90 0
520 * 77 89 0
521 * 78 88 0
522 * 79 87 0
523 * 80 86 0
524 * 81 85 0
525 * 82 84 0
526 * 83 83 0
527 * 84 82 0
528 * 85 81 0
529 * 86 80 0
530 * 87 79 0
531 * 88 78 0
532 * 89 77 0
533 * 90 76 0
534 * 91 75 0
535 * 92 74 0
536 * 93 73 0
537 * 94 72 0
538 * 95 71 0
539 * 96 70 0
540 * 97 69 0
541 * 98 68 0
542 */
543
544/**
545 * 5 GHz gain table
546 *
547 * Index Dsp gain Radio gain
548 * -9 123 0x3F (highest gain)
549 * -8 117 0x3F
550 * -7 110 0x3F
551 * -6 104 0x3F
552 * -5 98 0x3F
553 * -4 110 0x3E
554 * -3 104 0x3E
555 * -2 98 0x3E
556 * -1 110 0x3D
557 * 0 104 0x3D
558 * 1 98 0x3D
559 * 2 110 0x3C
560 * 3 104 0x3C
561 * 4 98 0x3C
562 * 5 110 0x3B
563 * 6 104 0x3B
564 * 7 98 0x3B
565 * 8 110 0x3A
566 * 9 104 0x3A
567 * 10 98 0x3A
568 * 11 110 0x39
569 * 12 104 0x39
570 * 13 98 0x39
571 * 14 110 0x38
572 * 15 104 0x38
573 * 16 98 0x38
574 * 17 110 0x37
575 * 18 104 0x37
576 * 19 98 0x37
577 * 20 110 0x36
578 * 21 104 0x36
579 * 22 98 0x36
580 * 23 110 0x35
581 * 24 104 0x35
582 * 25 98 0x35
583 * 26 110 0x34
584 * 27 104 0x34
585 * 28 98 0x34
586 * 29 110 0x33
587 * 30 104 0x33
588 * 31 98 0x33
589 * 32 110 0x32
590 * 33 104 0x32
591 * 34 98 0x32
592 * 35 110 0x31
593 * 36 104 0x31
594 * 37 98 0x31
595 * 38 110 0x30
596 * 39 104 0x30
597 * 40 98 0x30
598 * 41 110 0x25
599 * 42 104 0x25
600 * 43 98 0x25
601 * 44 110 0x24
602 * 45 104 0x24
603 * 46 98 0x24
604 * 47 110 0x23
605 * 48 104 0x23
606 * 49 98 0x23
607 * 50 110 0x22
608 * 51 104 0x18
609 * 52 98 0x18
610 * 53 110 0x17
611 * 54 104 0x17
612 * 55 98 0x17
613 * 56 110 0x16
614 * 57 104 0x16
615 * 58 98 0x16
616 * 59 110 0x15
617 * 60 104 0x15
618 * 61 98 0x15
619 * 62 110 0x14
620 * 63 104 0x14
621 * 64 98 0x14
622 * 65 110 0x13
623 * 66 104 0x13
624 * 67 98 0x13
625 * 68 110 0x12
626 * 69 104 0x08
627 * 70 98 0x08
628 * 71 110 0x07
629 * 72 104 0x07
630 * 73 98 0x07
631 * 74 110 0x06
632 * 75 104 0x06
633 * 76 98 0x06
634 * 77 110 0x05
635 * 78 104 0x05
636 * 79 98 0x05
637 * 80 110 0x04
638 * 81 104 0x04
639 * 82 98 0x04
640 * 83 110 0x03
641 * 84 104 0x03
642 * 85 98 0x03
643 * 86 110 0x02
644 * 87 104 0x02
645 * 88 98 0x02
646 * 89 110 0x01
647 * 90 104 0x01
648 * 91 98 0x01
649 * 92 110 0x00
650 * 93 104 0x00
651 * 94 98 0x00
652 * 95 93 0x00
653 * 96 88 0x00
654 * 97 83 0x00
655 * 98 78 0x00
656 */
657
658
659/**
660 * Sanity checks and default values for EEPROM regulatory levels.
661 * If EEPROM values fall outside MIN/MAX range, use default values.
662 *
663 * Regulatory limits refer to the maximum average txpower allowed by
664 * regulatory agencies in the geographies in which the device is meant
665 * to be operated. These limits are SKU-specific (i.e. geography-specific),
666 * and channel-specific; each channel has an individual regulatory limit
667 * listed in the EEPROM.
668 *
669 * Units are in half-dBm (i.e. "34" means 17 dBm).
670 */
671#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
672#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
673#define IWL_TX_POWER_REGULATORY_MIN (0)
674#define IWL_TX_POWER_REGULATORY_MAX (34)
675
676/**
677 * Sanity checks and default values for EEPROM saturation levels.
678 * If EEPROM values fall outside MIN/MAX range, use default values.
679 *
680 * Saturation is the highest level that the output power amplifier can produce
681 * without significant clipping distortion. This is a "peak" power level.
682 * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
683 * require differing amounts of backoff, relative to their average power output,
684 * in order to avoid clipping distortion.
685 *
686 * Driver must make sure that it is violating neither the saturation limit,
687 * nor the regulatory limit, when calculating Tx power settings for various
688 * rates.
689 *
690 * Units are in half-dBm (i.e. "38" means 19 dBm).
691 */
692#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
693#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
694#define IWL_TX_POWER_SATURATION_MIN (20)
695#define IWL_TX_POWER_SATURATION_MAX (50)
696
697/**
698 * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
699 * and thermal Txpower calibration.
700 *
701 * When calculating txpower, driver must compensate for current device
702 * temperature; higher temperature requires higher gain. Driver must calculate
703 * current temperature (see "4965 temperature calculation"), then compare vs.
704 * factory calibration temperature in EEPROM; if current temperature is higher
705 * than factory temperature, driver must *increase* gain by proportions shown
706 * in table below. If current temperature is lower than factory, driver must
707 * *decrease* gain.
708 *
709 * Different frequency ranges require different compensation, as shown below.
710 */
711/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
712#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
713#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
714
715/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
716#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
717#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
718
719/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
720#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
721#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
722
723/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
724#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
725#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
726
727/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
728#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
729#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
730
731enum {
732 CALIB_CH_GROUP_1 = 0,
733 CALIB_CH_GROUP_2 = 1,
734 CALIB_CH_GROUP_3 = 2,
735 CALIB_CH_GROUP_4 = 3,
736 CALIB_CH_GROUP_5 = 4,
737 CALIB_CH_GROUP_MAX
738};
739
740/********************* END TXPOWER *****************************************/
741
742
743/**
744 * Tx/Rx Queues
745 *
746 * Most communication between driver and 4965 is via queues of data buffers.
747 * For example, all commands that the driver issues to device's embedded
748 * controller (uCode) are via the command queue (one of the Tx queues). All
749 * uCode command responses/replies/notifications, including Rx frames, are
750 * conveyed from uCode to driver via the Rx queue.
751 *
752 * Most support for these queues, including handshake support, resides in
753 * structures in host DRAM, shared between the driver and the device. When
754 * allocating this memory, the driver must make sure that data written by
755 * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
756 * cache memory), so DRAM and cache are consistent, and the device can
757 * immediately see changes made by the driver.
758 *
759 * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
760 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
761 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
762 */
763#define IWL49_NUM_FIFOS 7
764#define IWL49_CMD_FIFO_NUM 4
765#define IWL49_NUM_QUEUES 16
766#define IWL49_NUM_AMPDU_QUEUES 8
767
768
769/**
770 * struct iwl4965_schedq_bc_tbl
771 *
772 * Byte Count table
773 *
774 * Each Tx queue uses a byte-count table containing 320 entries:
775 * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
776 * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
777 * max Tx window is 64 TFDs).
778 *
779 * When driver sets up a new TFD, it must also enter the total byte count
780 * of the frame to be transmitted into the corresponding entry in the byte
781 * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
782 * must duplicate the byte count entry in corresponding index 256-319.
783 *
784 * padding puts each byte count table on a 1024-byte boundary;
785 * 4965 assumes tables are separated by 1024 bytes.
786 */
787struct iwl4965_scd_bc_tbl {
788 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __packed;
791
792
793#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
794
795/* RSSI to dBm */
796#define IWL4965_RSSI_OFFSET 44
797
798/* PCI registers */
799#define PCI_CFG_RETRY_TIMEOUT 0x041
800
801/* PCI register values */
802#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
803#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
804
805#define IWL4965_DEFAULT_TX_RETRY 15
806
807/* EEPROM */
808#define IWL4965_FIRST_AMPDU_QUEUE 10
809
810
811#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
deleted file mode 100644
index 6862fdcaee62..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <net/mac80211.h>
36#include <linux/etherdevice.h>
37#include <asm/unaligned.h>
38
39#include "iwl-commands.h"
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43#include "iwl-4965-led.h"
44
45/* Send led command */
46static int
47iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
48{
49 struct iwl_host_cmd cmd = {
50 .id = REPLY_LEDS_CMD,
51 .len = sizeof(struct iwl_led_cmd),
52 .data = led_cmd,
53 .flags = CMD_ASYNC,
54 .callback = NULL,
55 };
56 u32 reg;
57
58 reg = iwl_read32(priv, CSR_LED_REG);
59 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
60 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
61
62 return iwl_legacy_send_cmd(priv, &cmd);
63}
64
65/* Set led register off */
66void iwl4965_led_enable(struct iwl_priv *priv)
67{
68 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
69}
70
71const struct iwl_led_ops iwl4965_led_ops = {
72 .cmd = iwl4965_send_led_cmd,
73};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
deleted file mode 100644
index 5ed3615fc338..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_4965_led_h__
28#define __iwl_4965_led_h__
29
30extern const struct iwl_led_ops iwl4965_led_ops;
31void iwl4965_led_enable(struct iwl_priv *priv);
32
33#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
deleted file mode 100644
index 2be6d9e3b019..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ /dev/null
@@ -1,1194 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-sta.h"
42
43void iwl4965_check_abort_status(struct iwl_priv *priv,
44 u8 frame_count, u32 status)
45{
46 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
47 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
48 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
49 queue_work(priv->workqueue, &priv->tx_flush);
50 }
51}
52
53/*
54 * EEPROM
55 */
56struct iwl_mod_params iwl4965_mod_params = {
57 .amsdu_size_8K = 1,
58 .restart_fw = 1,
59 /* the rest are 0 by default */
60};
61
62void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
63{
64 unsigned long flags;
65 int i;
66 spin_lock_irqsave(&rxq->lock, flags);
67 INIT_LIST_HEAD(&rxq->rx_free);
68 INIT_LIST_HEAD(&rxq->rx_used);
69 /* Fill the rx_used queue with _all_ of the Rx buffers */
70 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
71 /* In the reset function, these buffers may have been allocated
72 * to an SKB, so we need to unmap and free potential storage */
73 if (rxq->pool[i].page != NULL) {
74 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
75 PAGE_SIZE << priv->hw_params.rx_page_order,
76 PCI_DMA_FROMDEVICE);
77 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
78 rxq->pool[i].page = NULL;
79 }
80 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
81 }
82
83 for (i = 0; i < RX_QUEUE_SIZE; i++)
84 rxq->queue[i] = NULL;
85
86 /* Set us so that we have processed and used all buffers, but have
87 * not restocked the Rx queue with fresh buffers */
88 rxq->read = rxq->write = 0;
89 rxq->write_actual = 0;
90 rxq->free_count = 0;
91 spin_unlock_irqrestore(&rxq->lock, flags);
92}
93
94int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
95{
96 u32 rb_size;
97 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
98 u32 rb_timeout = 0;
99
100 if (priv->cfg->mod_params->amsdu_size_8K)
101 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
102 else
103 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
104
105 /* Stop Rx DMA */
106 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
107
108 /* Reset driver's Rx queue write index */
109 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
110
111 /* Tell device where to find RBD circular buffer in DRAM */
112 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
113 (u32)(rxq->bd_dma >> 8));
114
115 /* Tell device where in DRAM to update its Rx status */
116 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
117 rxq->rb_stts_dma >> 4);
118
119 /* Enable Rx DMA
120 * Direct rx interrupts to hosts
121 * Rx buffer size 4 or 8k
122 * RB timeout 0x10
123 * 256 RBDs
124 */
125 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
126 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
127 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
128 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
129 rb_size|
130 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
131 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
132
133 /* Set interrupt coalescing timer to default (2048 usecs) */
134 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
135
136 return 0;
137}
138
139static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
140{
141/*
142 * (for documentation purposes)
143 * to set power to V_AUX, do:
144
145 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
146 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
147 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
148 ~APMG_PS_CTRL_MSK_PWR_SRC);
149 */
150
151 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
152 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
153 ~APMG_PS_CTRL_MSK_PWR_SRC);
154}
155
156int iwl4965_hw_nic_init(struct iwl_priv *priv)
157{
158 unsigned long flags;
159 struct iwl_rx_queue *rxq = &priv->rxq;
160 int ret;
161
162 /* nic_init */
163 spin_lock_irqsave(&priv->lock, flags);
164 priv->cfg->ops->lib->apm_ops.init(priv);
165
166 /* Set interrupt coalescing calibration timer to default (512 usecs) */
167 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 iwl4965_set_pwr_vmain(priv);
172
173 priv->cfg->ops->lib->apm_ops.config(priv);
174
175 /* Allocate the RX queue, or reset if it is already allocated */
176 if (!rxq->bd) {
177 ret = iwl_legacy_rx_queue_alloc(priv);
178 if (ret) {
179 IWL_ERR(priv, "Unable to initialize Rx queue\n");
180 return -ENOMEM;
181 }
182 } else
183 iwl4965_rx_queue_reset(priv, rxq);
184
185 iwl4965_rx_replenish(priv);
186
187 iwl4965_rx_init(priv, rxq);
188
189 spin_lock_irqsave(&priv->lock, flags);
190
191 rxq->need_update = 1;
192 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
193
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 /* Allocate or reset and init all Tx and Command queues */
197 if (!priv->txq) {
198 ret = iwl4965_txq_ctx_alloc(priv);
199 if (ret)
200 return ret;
201 } else
202 iwl4965_txq_ctx_reset(priv);
203
204 set_bit(STATUS_INIT, &priv->status);
205
206 return 0;
207}
208
209/**
210 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
211 */
212static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
213 dma_addr_t dma_addr)
214{
215 return cpu_to_le32((u32)(dma_addr >> 8));
216}
217
218/**
219 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
220 *
221 * If there are slots in the RX queue that need to be restocked,
222 * and we have free pre-allocated buffers, fill the ranks as much
223 * as we can, pulling from rx_free.
224 *
225 * This moves the 'write' index forward to catch up with 'processed', and
226 * also updates the memory address in the firmware to reference the new
227 * target buffer.
228 */
229void iwl4965_rx_queue_restock(struct iwl_priv *priv)
230{
231 struct iwl_rx_queue *rxq = &priv->rxq;
232 struct list_head *element;
233 struct iwl_rx_mem_buffer *rxb;
234 unsigned long flags;
235
236 spin_lock_irqsave(&rxq->lock, flags);
237 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
238 /* The overwritten rxb must be a used one */
239 rxb = rxq->queue[rxq->write];
240 BUG_ON(rxb && rxb->page);
241
242 /* Get next free Rx buffer, remove from free list */
243 element = rxq->rx_free.next;
244 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
245 list_del(element);
246
247 /* Point to Rx buffer via next RBD in circular buffer */
248 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
249 rxb->page_dma);
250 rxq->queue[rxq->write] = rxb;
251 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
252 rxq->free_count--;
253 }
254 spin_unlock_irqrestore(&rxq->lock, flags);
255 /* If the pre-allocated buffer pool is dropping low, schedule to
256 * refill it */
257 if (rxq->free_count <= RX_LOW_WATERMARK)
258 queue_work(priv->workqueue, &priv->rx_replenish);
259
260
261 /* If we've added more space for the firmware to place data, tell it.
262 * Increment device's write pointer in multiples of 8. */
263 if (rxq->write_actual != (rxq->write & ~0x7)) {
264 spin_lock_irqsave(&rxq->lock, flags);
265 rxq->need_update = 1;
266 spin_unlock_irqrestore(&rxq->lock, flags);
267 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
268 }
269}
270
271/**
272 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
273 *
274 * When moving to rx_free an SKB is allocated for the slot.
275 *
276 * Also restock the Rx queue via iwl_rx_queue_restock.
277 * This is called as a scheduled work item (except for during initialization)
278 */
279static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
280{
281 struct iwl_rx_queue *rxq = &priv->rxq;
282 struct list_head *element;
283 struct iwl_rx_mem_buffer *rxb;
284 struct page *page;
285 unsigned long flags;
286 gfp_t gfp_mask = priority;
287
288 while (1) {
289 spin_lock_irqsave(&rxq->lock, flags);
290 if (list_empty(&rxq->rx_used)) {
291 spin_unlock_irqrestore(&rxq->lock, flags);
292 return;
293 }
294 spin_unlock_irqrestore(&rxq->lock, flags);
295
296 if (rxq->free_count > RX_LOW_WATERMARK)
297 gfp_mask |= __GFP_NOWARN;
298
299 if (priv->hw_params.rx_page_order > 0)
300 gfp_mask |= __GFP_COMP;
301
302 /* Alloc a new receive buffer */
303 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
304 if (!page) {
305 if (net_ratelimit())
306 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
307 "order: %d\n",
308 priv->hw_params.rx_page_order);
309
310 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
311 net_ratelimit())
312 IWL_CRIT(priv,
313 "Failed to alloc_pages with %s. "
314 "Only %u free buffers remaining.\n",
315 priority == GFP_ATOMIC ?
316 "GFP_ATOMIC" : "GFP_KERNEL",
317 rxq->free_count);
318 /* We don't reschedule replenish work here -- we will
319 * call the restock method and if it still needs
320 * more buffers it will schedule replenish */
321 return;
322 }
323
324 spin_lock_irqsave(&rxq->lock, flags);
325
326 if (list_empty(&rxq->rx_used)) {
327 spin_unlock_irqrestore(&rxq->lock, flags);
328 __free_pages(page, priv->hw_params.rx_page_order);
329 return;
330 }
331 element = rxq->rx_used.next;
332 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
333 list_del(element);
334
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 BUG_ON(rxb->page);
338 rxb->page = page;
339 /* Get physical address of the RB */
340 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
341 PAGE_SIZE << priv->hw_params.rx_page_order,
342 PCI_DMA_FROMDEVICE);
343 /* dma address must be no more than 36 bits */
344 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
345 /* and also 256 byte aligned! */
346 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
347
348 spin_lock_irqsave(&rxq->lock, flags);
349
350 list_add_tail(&rxb->list, &rxq->rx_free);
351 rxq->free_count++;
352 priv->alloc_rxb_page++;
353
354 spin_unlock_irqrestore(&rxq->lock, flags);
355 }
356}
357
358void iwl4965_rx_replenish(struct iwl_priv *priv)
359{
360 unsigned long flags;
361
362 iwl4965_rx_allocate(priv, GFP_KERNEL);
363
364 spin_lock_irqsave(&priv->lock, flags);
365 iwl4965_rx_queue_restock(priv);
366 spin_unlock_irqrestore(&priv->lock, flags);
367}
368
369void iwl4965_rx_replenish_now(struct iwl_priv *priv)
370{
371 iwl4965_rx_allocate(priv, GFP_ATOMIC);
372
373 iwl4965_rx_queue_restock(priv);
374}
375
376/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
377 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
378 * This free routine walks the list of POOL entries and if SKB is set to
379 * non NULL it is unmapped and freed
380 */
381void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
382{
383 int i;
384 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
385 if (rxq->pool[i].page != NULL) {
386 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
387 PAGE_SIZE << priv->hw_params.rx_page_order,
388 PCI_DMA_FROMDEVICE);
389 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
390 rxq->pool[i].page = NULL;
391 }
392 }
393
394 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
395 rxq->bd_dma);
396 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
397 rxq->rb_stts, rxq->rb_stts_dma);
398 rxq->bd = NULL;
399 rxq->rb_stts = NULL;
400}
401
402int iwl4965_rxq_stop(struct iwl_priv *priv)
403{
404
405 /* stop Rx DMA */
406 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
407 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
408 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
409
410 return 0;
411}
412
413int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
414{
415 int idx = 0;
416 int band_offset = 0;
417
418 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
419 if (rate_n_flags & RATE_MCS_HT_MSK) {
420 idx = (rate_n_flags & 0xff);
421 return idx;
422 /* Legacy rate format, search for match in table */
423 } else {
424 if (band == IEEE80211_BAND_5GHZ)
425 band_offset = IWL_FIRST_OFDM_RATE;
426 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
427 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
428 return idx - band_offset;
429 }
430
431 return -1;
432}
433
434static int iwl4965_calc_rssi(struct iwl_priv *priv,
435 struct iwl_rx_phy_res *rx_resp)
436{
437 /* data from PHY/DSP regarding signal strength, etc.,
438 * contents are always there, not configurable by host. */
439 struct iwl4965_rx_non_cfg_phy *ncphy =
440 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
441 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
442 >> IWL49_AGC_DB_POS;
443
444 u32 valid_antennae =
445 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
446 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
447 u8 max_rssi = 0;
448 u32 i;
449
450 /* Find max rssi among 3 possible receivers.
451 * These values are measured by the digital signal processor (DSP).
452 * They should stay fairly constant even as the signal strength varies,
453 * if the radio's automatic gain control (AGC) is working right.
454 * AGC value (see below) will provide the "interesting" info. */
455 for (i = 0; i < 3; i++)
456 if (valid_antennae & (1 << i))
457 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
458
459 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
460 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
461 max_rssi, agc);
462
463 /* dBm = max_rssi dB - agc dB - constant.
464 * Higher AGC (higher radio gain) means lower signal. */
465 return max_rssi - agc - IWL4965_RSSI_OFFSET;
466}
467
468
469static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
470{
471 u32 decrypt_out = 0;
472
473 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
474 RX_RES_STATUS_STATION_FOUND)
475 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
476 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
477
478 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
479
480 /* packet was not encrypted */
481 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
482 RX_RES_STATUS_SEC_TYPE_NONE)
483 return decrypt_out;
484
485 /* packet was encrypted with unknown alg */
486 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
487 RX_RES_STATUS_SEC_TYPE_ERR)
488 return decrypt_out;
489
490 /* decryption was not done in HW */
491 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
492 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
493 return decrypt_out;
494
495 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
496
497 case RX_RES_STATUS_SEC_TYPE_CCMP:
498 /* alg is CCM: check MIC only */
499 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
500 /* Bad MIC */
501 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
502 else
503 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
504
505 break;
506
507 case RX_RES_STATUS_SEC_TYPE_TKIP:
508 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
509 /* Bad TTAK */
510 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
511 break;
512 }
513 /* fall through if TTAK OK */
514 default:
515 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
516 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
517 else
518 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
519 break;
520 }
521
522 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
523 decrypt_in, decrypt_out);
524
525 return decrypt_out;
526}
527
528static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
529 struct ieee80211_hdr *hdr,
530 u16 len,
531 u32 ampdu_status,
532 struct iwl_rx_mem_buffer *rxb,
533 struct ieee80211_rx_status *stats)
534{
535 struct sk_buff *skb;
536 __le16 fc = hdr->frame_control;
537
538 /* We only process data packets if the interface is open */
539 if (unlikely(!priv->is_open)) {
540 IWL_DEBUG_DROP_LIMIT(priv,
541 "Dropping packet while interface is not open.\n");
542 return;
543 }
544
545 /* In case of HW accelerated crypto and bad decryption, drop */
546 if (!priv->cfg->mod_params->sw_crypto &&
547 iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
548 return;
549
550 skb = dev_alloc_skb(128);
551 if (!skb) {
552 IWL_ERR(priv, "dev_alloc_skb failed\n");
553 return;
554 }
555
556 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
557
558 iwl_legacy_update_stats(priv, false, fc, len);
559 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
560
561 ieee80211_rx(priv->hw, skb);
562 priv->alloc_rxb_page--;
563 rxb->page = NULL;
564}
565
566/* Called for REPLY_RX (legacy ABG frames), or
567 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
568void iwl4965_rx_reply_rx(struct iwl_priv *priv,
569 struct iwl_rx_mem_buffer *rxb)
570{
571 struct ieee80211_hdr *header;
572 struct ieee80211_rx_status rx_status;
573 struct iwl_rx_packet *pkt = rxb_addr(rxb);
574 struct iwl_rx_phy_res *phy_res;
575 __le32 rx_pkt_status;
576 struct iwl_rx_mpdu_res_start *amsdu;
577 u32 len;
578 u32 ampdu_status;
579 u32 rate_n_flags;
580
581 /**
582 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
583 * REPLY_RX: physical layer info is in this buffer
584 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
585 * command and cached in priv->last_phy_res
586 *
587 * Here we set up local variables depending on which command is
588 * received.
589 */
590 if (pkt->hdr.cmd == REPLY_RX) {
591 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
592 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
593 + phy_res->cfg_phy_cnt);
594
595 len = le16_to_cpu(phy_res->byte_count);
596 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
597 phy_res->cfg_phy_cnt + len);
598 ampdu_status = le32_to_cpu(rx_pkt_status);
599 } else {
600 if (!priv->_4965.last_phy_res_valid) {
601 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
602 return;
603 }
604 phy_res = &priv->_4965.last_phy_res;
605 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
606 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
607 len = le16_to_cpu(amsdu->byte_count);
608 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
609 ampdu_status = iwl4965_translate_rx_status(priv,
610 le32_to_cpu(rx_pkt_status));
611 }
612
613 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
614 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
615 phy_res->cfg_phy_cnt);
616 return;
617 }
618
619 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
620 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
621 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
622 le32_to_cpu(rx_pkt_status));
623 return;
624 }
625
626 /* This will be used in several places later */
627 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
628
629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
632 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
633 rx_status.freq =
634 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
635 rx_status.band);
636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0;
639
640 /* TSF isn't reliable. In order to allow smooth user experience,
641 * this W/A doesn't propagate it to the mac80211 */
642 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
643
644 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
645
646 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
647 rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
648
649 iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
650 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
651 rx_status.signal, (unsigned long long)rx_status.mactime);
652
653 /*
654 * "antenna number"
655 *
656 * It seems that the antenna field in the phy flags value
657 * is actually a bit field. This is undefined by radiotap,
658 * it wants an actual antenna number but I always get "7"
659 * for most legacy frames I receive indicating that the
660 * same frame was received on all three RX chains.
661 *
662 * I think this field should be removed in favor of a
663 * new 802.11n radiotap field "RX chains" that is defined
664 * as a bitmask.
665 */
666 rx_status.antenna =
667 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
668 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
669
670 /* set the preamble flag if appropriate */
671 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
672 rx_status.flag |= RX_FLAG_SHORTPRE;
673
674 /* Set up the HT phy flags */
675 if (rate_n_flags & RATE_MCS_HT_MSK)
676 rx_status.flag |= RX_FLAG_HT;
677 if (rate_n_flags & RATE_MCS_HT40_MSK)
678 rx_status.flag |= RX_FLAG_40MHZ;
679 if (rate_n_flags & RATE_MCS_SGI_MSK)
680 rx_status.flag |= RX_FLAG_SHORT_GI;
681
682 iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
683 rxb, &rx_status);
684}
685
686/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
687 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
688void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
689 struct iwl_rx_mem_buffer *rxb)
690{
691 struct iwl_rx_packet *pkt = rxb_addr(rxb);
692 priv->_4965.last_phy_res_valid = true;
693 memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
694 sizeof(struct iwl_rx_phy_res));
695}
696
697static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
698 struct ieee80211_vif *vif,
699 enum ieee80211_band band,
700 u8 is_active, u8 n_probes,
701 struct iwl_scan_channel *scan_ch)
702{
703 struct ieee80211_channel *chan;
704 const struct ieee80211_supported_band *sband;
705 const struct iwl_channel_info *ch_info;
706 u16 passive_dwell = 0;
707 u16 active_dwell = 0;
708 int added, i;
709 u16 channel;
710
711 sband = iwl_get_hw_mode(priv, band);
712 if (!sband)
713 return 0;
714
715 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
716 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
717
718 if (passive_dwell <= active_dwell)
719 passive_dwell = active_dwell + 1;
720
721 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
722 chan = priv->scan_request->channels[i];
723
724 if (chan->band != band)
725 continue;
726
727 channel = chan->hw_value;
728 scan_ch->channel = cpu_to_le16(channel);
729
730 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
731 if (!iwl_legacy_is_channel_valid(ch_info)) {
732 IWL_DEBUG_SCAN(priv,
733 "Channel %d is INVALID for this band.\n",
734 channel);
735 continue;
736 }
737
738 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
739 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
740 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
741 else
742 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
743
744 if (n_probes)
745 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
746
747 scan_ch->active_dwell = cpu_to_le16(active_dwell);
748 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
749
750 /* Set txpower levels to defaults */
751 scan_ch->dsp_atten = 110;
752
753 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
754 * power level:
755 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
756 */
757 if (band == IEEE80211_BAND_5GHZ)
758 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
759 else
760 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
761
762 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
763 channel, le32_to_cpu(scan_ch->type),
764 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
765 "ACTIVE" : "PASSIVE",
766 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
767 active_dwell : passive_dwell);
768
769 scan_ch++;
770 added++;
771 }
772
773 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
774 return added;
775}
776
777int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
778{
779 struct iwl_host_cmd cmd = {
780 .id = REPLY_SCAN_CMD,
781 .len = sizeof(struct iwl_scan_cmd),
782 .flags = CMD_SIZE_HUGE,
783 };
784 struct iwl_scan_cmd *scan;
785 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
786 u32 rate_flags = 0;
787 u16 cmd_len;
788 u16 rx_chain = 0;
789 enum ieee80211_band band;
790 u8 n_probes = 0;
791 u8 rx_ant = priv->hw_params.valid_rx_ant;
792 u8 rate;
793 bool is_active = false;
794 int chan_mod;
795 u8 active_chains;
796 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
797 int ret;
798
799 lockdep_assert_held(&priv->mutex);
800
801 if (vif)
802 ctx = iwl_legacy_rxon_ctx_from_vif(vif);
803
804 if (!priv->scan_cmd) {
805 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
806 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
807 if (!priv->scan_cmd) {
808 IWL_DEBUG_SCAN(priv,
809 "fail to allocate memory for scan\n");
810 return -ENOMEM;
811 }
812 }
813 scan = priv->scan_cmd;
814 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
815
816 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
817 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
818
819 if (iwl_legacy_is_any_associated(priv)) {
820 u16 interval;
821 u32 extra;
822 u32 suspend_time = 100;
823 u32 scan_suspend_time = 100;
824
825 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
826 interval = vif->bss_conf.beacon_int;
827
828 scan->suspend_time = 0;
829 scan->max_out_time = cpu_to_le32(200 * 1024);
830 if (!interval)
831 interval = suspend_time;
832
833 extra = (suspend_time / interval) << 22;
834 scan_suspend_time = (extra |
835 ((suspend_time % interval) * 1024));
836 scan->suspend_time = cpu_to_le32(scan_suspend_time);
837 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
838 scan_suspend_time, interval);
839 }
840
841 if (priv->scan_request->n_ssids) {
842 int i, p = 0;
843 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
844 for (i = 0; i < priv->scan_request->n_ssids; i++) {
845 /* always does wildcard anyway */
846 if (!priv->scan_request->ssids[i].ssid_len)
847 continue;
848 scan->direct_scan[p].id = WLAN_EID_SSID;
849 scan->direct_scan[p].len =
850 priv->scan_request->ssids[i].ssid_len;
851 memcpy(scan->direct_scan[p].ssid,
852 priv->scan_request->ssids[i].ssid,
853 priv->scan_request->ssids[i].ssid_len);
854 n_probes++;
855 p++;
856 }
857 is_active = true;
858 } else
859 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
860
861 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
862 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
863 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
864
865 switch (priv->scan_band) {
866 case IEEE80211_BAND_2GHZ:
867 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
868 chan_mod = le32_to_cpu(
869 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
870 RXON_FLG_CHANNEL_MODE_MSK)
871 >> RXON_FLG_CHANNEL_MODE_POS;
872 if (chan_mod == CHANNEL_MODE_PURE_40) {
873 rate = IWL_RATE_6M_PLCP;
874 } else {
875 rate = IWL_RATE_1M_PLCP;
876 rate_flags = RATE_MCS_CCK_MSK;
877 }
878 break;
879 case IEEE80211_BAND_5GHZ:
880 rate = IWL_RATE_6M_PLCP;
881 break;
882 default:
883 IWL_WARN(priv, "Invalid scan band\n");
884 return -EIO;
885 }
886
887 /*
888 * If active scanning is requested but a certain channel is
889 * marked passive, we can do active scanning if we detect
890 * transmissions.
891 *
892 * There is an issue with some firmware versions that triggers
893 * a sysassert on a "good CRC threshold" of zero (== disabled),
894 * on a radar channel even though this means that we should NOT
895 * send probes.
896 *
897 * The "good CRC threshold" is the number of frames that we
898 * need to receive during our dwell time on a channel before
899 * sending out probes -- setting this to a huge value will
900 * mean we never reach it, but at the same time work around
901 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
902 * here instead of IWL_GOOD_CRC_TH_DISABLED.
903 */
904 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
905 IWL_GOOD_CRC_TH_NEVER;
906
907 band = priv->scan_band;
908
909 if (priv->cfg->scan_rx_antennas[band])
910 rx_ant = priv->cfg->scan_rx_antennas[band];
911
912 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
913 priv->scan_tx_ant[band],
914 scan_tx_antennas);
915 rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
916 scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
917
918 /* In power save mode use one chain, otherwise use all chains */
919 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
920 /* rx_ant has been set to all valid chains previously */
921 active_chains = rx_ant &
922 ((u8)(priv->chain_noise_data.active_chains));
923 if (!active_chains)
924 active_chains = rx_ant;
925
926 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
927 priv->chain_noise_data.active_chains);
928
929 rx_ant = iwl4965_first_antenna(active_chains);
930 }
931
932 /* MIMO is not used here, but value is required */
933 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
934 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
935 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
936 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
937 scan->rx_chain = cpu_to_le16(rx_chain);
938
939 cmd_len = iwl_legacy_fill_probe_req(priv,
940 (struct ieee80211_mgmt *)scan->data,
941 vif->addr,
942 priv->scan_request->ie,
943 priv->scan_request->ie_len,
944 IWL_MAX_SCAN_SIZE - sizeof(*scan));
945 scan->tx_cmd.len = cpu_to_le16(cmd_len);
946
947 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
948 RXON_FILTER_BCON_AWARE_MSK);
949
950 scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
951 is_active, n_probes,
952 (void *)&scan->data[cmd_len]);
953 if (scan->channel_count == 0) {
954 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
955 return -EIO;
956 }
957
958 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
959 scan->channel_count * sizeof(struct iwl_scan_channel);
960 cmd.data = scan;
961 scan->len = cpu_to_le16(cmd.len);
962
963 set_bit(STATUS_SCAN_HW, &priv->status);
964
965 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
966 if (ret)
967 clear_bit(STATUS_SCAN_HW, &priv->status);
968
969 return ret;
970}
971
972int iwl4965_manage_ibss_station(struct iwl_priv *priv,
973 struct ieee80211_vif *vif, bool add)
974{
975 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
976
977 if (add)
978 return iwl4965_add_bssid_station(priv, vif_priv->ctx,
979 vif->bss_conf.bssid,
980 &vif_priv->ibss_bssid_sta_id);
981 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
982 vif->bss_conf.bssid);
983}
984
985void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
986 int sta_id, int tid, int freed)
987{
988 lockdep_assert_held(&priv->sta_lock);
989
990 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
991 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
992 else {
993 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
994 priv->stations[sta_id].tid[tid].tfds_in_queue,
995 freed);
996 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
997 }
998}
999
1000#define IWL_TX_QUEUE_MSK 0xfffff
1001
1002static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
1003{
1004 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1005 priv->current_ht_config.single_chain_sufficient;
1006}
1007
1008#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1009#define IWL_NUM_RX_CHAINS_SINGLE 2
1010#define IWL_NUM_IDLE_CHAINS_DUAL 2
1011#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1012
1013/*
1014 * Determine how many receiver/antenna chains to use.
1015 *
1016 * More provides better reception via diversity. Fewer saves power
1017 * at the expense of throughput, but only when not in powersave to
1018 * start with.
1019 *
1020 * MIMO (dual stream) requires at least 2, but works better with 3.
1021 * This does not determine *which* chains to use, just how many.
1022 */
1023static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
1024{
1025 /* # of Rx chains to use when expecting MIMO. */
1026 if (iwl4965_is_single_rx_stream(priv))
1027 return IWL_NUM_RX_CHAINS_SINGLE;
1028 else
1029 return IWL_NUM_RX_CHAINS_MULTIPLE;
1030}
1031
1032/*
1033 * When we are in power saving mode, unless device support spatial
1034 * multiplexing power save, use the active count for rx chain count.
1035 */
1036static int
1037iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1038{
1039 /* # Rx chains when idling, depending on SMPS mode */
1040 switch (priv->current_ht_config.smps) {
1041 case IEEE80211_SMPS_STATIC:
1042 case IEEE80211_SMPS_DYNAMIC:
1043 return IWL_NUM_IDLE_CHAINS_SINGLE;
1044 case IEEE80211_SMPS_OFF:
1045 return active_cnt;
1046 default:
1047 WARN(1, "invalid SMPS mode %d",
1048 priv->current_ht_config.smps);
1049 return active_cnt;
1050 }
1051}
1052
1053/* up to 4 chains */
1054static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
1055{
1056 u8 res;
1057 res = (chain_bitmap & BIT(0)) >> 0;
1058 res += (chain_bitmap & BIT(1)) >> 1;
1059 res += (chain_bitmap & BIT(2)) >> 2;
1060 res += (chain_bitmap & BIT(3)) >> 3;
1061 return res;
1062}
1063
1064/**
1065 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1066 *
1067 * Selects how many and which Rx receivers/antennas/chains to use.
1068 * This should not be used for scan command ... it puts data in wrong place.
1069 */
1070void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1071{
1072 bool is_single = iwl4965_is_single_rx_stream(priv);
1073 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1074 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1075 u32 active_chains;
1076 u16 rx_chain;
1077
1078 /* Tell uCode which antennas are actually connected.
1079 * Before first association, we assume all antennas are connected.
1080 * Just after first association, iwl4965_chain_noise_calibration()
1081 * checks which antennas actually *are* connected. */
1082 if (priv->chain_noise_data.active_chains)
1083 active_chains = priv->chain_noise_data.active_chains;
1084 else
1085 active_chains = priv->hw_params.valid_rx_ant;
1086
1087 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1088
1089 /* How many receivers should we use? */
1090 active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
1091 idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
1092
1093
1094 /* correct rx chain count according hw settings
1095 * and chain noise calibration
1096 */
1097 valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
1098 if (valid_rx_cnt < active_rx_cnt)
1099 active_rx_cnt = valid_rx_cnt;
1100
1101 if (valid_rx_cnt < idle_rx_cnt)
1102 idle_rx_cnt = valid_rx_cnt;
1103
1104 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1105 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1106
1107 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1108
1109 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1110 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1111 else
1112 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1113
1114 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1115 ctx->staging.rx_chain,
1116 active_rx_cnt, idle_rx_cnt);
1117
1118 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1119 active_rx_cnt < idle_rx_cnt);
1120}
1121
1122u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1123{
1124 int i;
1125 u8 ind = ant;
1126
1127 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1128 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1129 if (valid & BIT(ind))
1130 return ind;
1131 }
1132 return ant;
1133}
1134
1135static const char *iwl4965_get_fh_string(int cmd)
1136{
1137 switch (cmd) {
1138 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1139 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1140 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1141 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1142 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1143 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1144 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1145 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1146 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1147 default:
1148 return "UNKNOWN";
1149 }
1150}
1151
1152int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1153{
1154 int i;
1155#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1156 int pos = 0;
1157 size_t bufsz = 0;
1158#endif
1159 static const u32 fh_tbl[] = {
1160 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1161 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1162 FH_RSCSR_CHNL0_WPTR,
1163 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1164 FH_MEM_RSSR_SHARED_CTRL_REG,
1165 FH_MEM_RSSR_RX_STATUS_REG,
1166 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1167 FH_TSSR_TX_STATUS_REG,
1168 FH_TSSR_TX_ERROR_REG
1169 };
1170#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1171 if (display) {
1172 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1173 *buf = kmalloc(bufsz, GFP_KERNEL);
1174 if (!*buf)
1175 return -ENOMEM;
1176 pos += scnprintf(*buf + pos, bufsz - pos,
1177 "FH register values:\n");
1178 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1179 pos += scnprintf(*buf + pos, bufsz - pos,
1180 " %34s: 0X%08x\n",
1181 iwl4965_get_fh_string(fh_tbl[i]),
1182 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1183 }
1184 return pos;
1185 }
1186#endif
1187 IWL_ERR(priv, "FH register values:\n");
1188 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1189 IWL_ERR(priv, " %34s: 0X%08x\n",
1190 iwl4965_get_fh_string(fh_tbl[i]),
1191 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1192 }
1193 return 0;
1194}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
deleted file mode 100644
index 57ebe214e68c..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ /dev/null
@@ -1,2871 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37
38#include "iwl-dev.h"
39#include "iwl-sta.h"
40#include "iwl-core.h"
41#include "iwl-4965.h"
42
43#define IWL4965_RS_NAME "iwl-4965-rs"
44
45#define NUM_TRY_BEFORE_ANT_TOGGLE 1
46#define IWL_NUMBER_TRY 1
47#define IWL_HT_NUMBER_TRY 3
48
49#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
50#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
51#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
52
53/* max allowed rate miss before sync LQ cmd */
54#define IWL_MISSED_RATE_MAX 15
55/* max time to accum history 2 seconds */
56#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
57
58static u8 rs_ht_to_legacy[] = {
59 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
63 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
64 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
65 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
66};
67
68static const u8 ant_toggle_lookup[] = {
69 /*ANT_NONE -> */ ANT_NONE,
70 /*ANT_A -> */ ANT_B,
71 /*ANT_B -> */ ANT_C,
72 /*ANT_AB -> */ ANT_BC,
73 /*ANT_C -> */ ANT_A,
74 /*ANT_AC -> */ ANT_AB,
75 /*ANT_BC -> */ ANT_AC,
76 /*ANT_ABC -> */ ANT_ABC,
77};
78
79#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
80 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
81 IWL_RATE_SISO_##s##M_PLCP, \
82 IWL_RATE_MIMO2_##s##M_PLCP,\
83 IWL_RATE_##r##M_IEEE, \
84 IWL_RATE_##ip##M_INDEX, \
85 IWL_RATE_##in##M_INDEX, \
86 IWL_RATE_##rp##M_INDEX, \
87 IWL_RATE_##rn##M_INDEX, \
88 IWL_RATE_##pp##M_INDEX, \
89 IWL_RATE_##np##M_INDEX }
90
91/*
92 * Parameter order:
93 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
94 *
95 * If there isn't a valid next or previous rate then INV is used which
96 * maps to IWL_RATE_INVALID
97 *
98 */
99const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
100 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
101 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
102 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
103 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
104 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
105 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
106 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
107 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
108 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
109 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
110 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
111 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
112 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
113};
114
115static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
116{
117 int idx = 0;
118
119 /* HT rate format */
120 if (rate_n_flags & RATE_MCS_HT_MSK) {
121 idx = (rate_n_flags & 0xff);
122
123 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
124 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
125
126 idx += IWL_FIRST_OFDM_RATE;
127 /* skip 9M not supported in ht*/
128 if (idx >= IWL_RATE_9M_INDEX)
129 idx += 1;
130 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
131 return idx;
132
133 /* legacy rate format, search for match in table */
134 } else {
135 for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
136 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
137 return idx;
138 }
139
140 return -1;
141}
142
143static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
144 struct sk_buff *skb,
145 struct ieee80211_sta *sta,
146 struct iwl_lq_sta *lq_sta);
147static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
148 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
149static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
150 bool force_search);
151
152#ifdef CONFIG_MAC80211_DEBUGFS
153static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
154 u32 *rate_n_flags, int index);
155#else
156static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
157 u32 *rate_n_flags, int index)
158{}
159#endif
160
161/**
162 * The following tables contain the expected throughput metrics for all rates
163 *
164 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
165 *
166 * where invalid entries are zeros.
167 *
168 * CCK rates are only valid in legacy table and will only be used in G
169 * (2.4 GHz) band.
170 */
171
172static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
173 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
174};
175
176static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
177 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
178 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
179 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
180 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
181};
182
183static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
184 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
185 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
186 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
187 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
188};
189
190static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
191 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
192 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
193 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
194 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
195};
196
197static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
198 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
199 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
200 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
201 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
202};
203
204/* mbps, mcs */
205static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
206 { "1", "BPSK DSSS"},
207 { "2", "QPSK DSSS"},
208 {"5.5", "BPSK CCK"},
209 { "11", "QPSK CCK"},
210 { "6", "BPSK 1/2"},
211 { "9", "BPSK 1/2"},
212 { "12", "QPSK 1/2"},
213 { "18", "QPSK 3/4"},
214 { "24", "16QAM 1/2"},
215 { "36", "16QAM 3/4"},
216 { "48", "64QAM 2/3"},
217 { "54", "64QAM 3/4"},
218 { "60", "64QAM 5/6"},
219};
220
221#define MCS_INDEX_PER_STREAM (8)
222
223static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
224{
225 return (u8)(rate_n_flags & 0xFF);
226}
227
228static void
229iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
230{
231 window->data = 0;
232 window->success_counter = 0;
233 window->success_ratio = IWL_INVALID_VALUE;
234 window->counter = 0;
235 window->average_tpt = IWL_INVALID_VALUE;
236 window->stamp = 0;
237}
238
239static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
240{
241 return (ant_type & valid_antenna) == ant_type;
242}
243
244/*
245 * removes the old data from the statistics. All data that is older than
246 * TID_MAX_TIME_DIFF, will be deleted.
247 */
248static void
249iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
250{
251 /* The oldest age we want to keep */
252 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
253
254 while (tl->queue_count &&
255 (tl->time_stamp < oldest_time)) {
256 tl->total -= tl->packet_count[tl->head];
257 tl->packet_count[tl->head] = 0;
258 tl->time_stamp += TID_QUEUE_CELL_SPACING;
259 tl->queue_count--;
260 tl->head++;
261 if (tl->head >= TID_QUEUE_MAX_SIZE)
262 tl->head = 0;
263 }
264}
265
266/*
267 * increment traffic load value for tid and also remove
268 * any old values if passed the certain time period
269 */
270static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
271 struct ieee80211_hdr *hdr)
272{
273 u32 curr_time = jiffies_to_msecs(jiffies);
274 u32 time_diff;
275 s32 index;
276 struct iwl_traffic_load *tl = NULL;
277 u8 tid;
278
279 if (ieee80211_is_data_qos(hdr->frame_control)) {
280 u8 *qc = ieee80211_get_qos_ctl(hdr);
281 tid = qc[0] & 0xf;
282 } else
283 return MAX_TID_COUNT;
284
285 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
286 return MAX_TID_COUNT;
287
288 tl = &lq_data->load[tid];
289
290 curr_time -= curr_time % TID_ROUND_VALUE;
291
292 /* Happens only for the first packet. Initialize the data */
293 if (!(tl->queue_count)) {
294 tl->total = 1;
295 tl->time_stamp = curr_time;
296 tl->queue_count = 1;
297 tl->head = 0;
298 tl->packet_count[0] = 1;
299 return MAX_TID_COUNT;
300 }
301
302 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
303 index = time_diff / TID_QUEUE_CELL_SPACING;
304
305 /* The history is too long: remove data that is older than */
306 /* TID_MAX_TIME_DIFF */
307 if (index >= TID_QUEUE_MAX_SIZE)
308 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
309
310 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
311 tl->packet_count[index] = tl->packet_count[index] + 1;
312 tl->total = tl->total + 1;
313
314 if ((index + 1) > tl->queue_count)
315 tl->queue_count = index + 1;
316
317 return tid;
318}
319
320/*
321 get the traffic load value for tid
322*/
323static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
324{
325 u32 curr_time = jiffies_to_msecs(jiffies);
326 u32 time_diff;
327 s32 index;
328 struct iwl_traffic_load *tl = NULL;
329
330 if (tid >= TID_MAX_LOAD_COUNT)
331 return 0;
332
333 tl = &(lq_data->load[tid]);
334
335 curr_time -= curr_time % TID_ROUND_VALUE;
336
337 if (!(tl->queue_count))
338 return 0;
339
340 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
341 index = time_diff / TID_QUEUE_CELL_SPACING;
342
343 /* The history is too long: remove data that is older than */
344 /* TID_MAX_TIME_DIFF */
345 if (index >= TID_QUEUE_MAX_SIZE)
346 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
347
348 return tl->total;
349}
350
351static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
352 struct iwl_lq_sta *lq_data, u8 tid,
353 struct ieee80211_sta *sta)
354{
355 int ret = -EAGAIN;
356 u32 load;
357
358 load = iwl4965_rs_tl_get_load(lq_data, tid);
359
360 if (load > IWL_AGG_LOAD_THRESHOLD) {
361 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
362 sta->addr, tid);
363 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
364 if (ret == -EAGAIN) {
365 /*
366 * driver and mac80211 is out of sync
367 * this might be cause by reloading firmware
368 * stop the tx ba session here
369 */
370 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
371 tid);
372 ieee80211_stop_tx_ba_session(sta, tid);
373 }
374 } else {
375 IWL_ERR(priv, "Aggregation not enabled for tid %d "
376 "because load = %u\n", tid, load);
377 }
378 return ret;
379}
380
381static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
382 struct iwl_lq_sta *lq_data,
383 struct ieee80211_sta *sta)
384{
385 if (tid < TID_MAX_LOAD_COUNT)
386 iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
387 else
388 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
389 tid, TID_MAX_LOAD_COUNT);
390}
391
392static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
393{
394 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
395 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
397}
398
399/*
400 * Static function to get the expected throughput from an iwl_scale_tbl_info
401 * that wraps a NULL pointer check
402 */
403static s32
404iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
405{
406 if (tbl->expected_tpt)
407 return tbl->expected_tpt[rs_index];
408 return 0;
409}
410
411/**
412 * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
413 *
414 * We keep a sliding window of the last 62 packets transmitted
415 * at this rate. window->data contains the bitmask of successful
416 * packets.
417 */
418static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
419 int scale_index, int attempts, int successes)
420{
421 struct iwl_rate_scale_data *window = NULL;
422 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
423 s32 fail_count, tpt;
424
425 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
426 return -EINVAL;
427
428 /* Select window for current tx bit rate */
429 window = &(tbl->win[scale_index]);
430
431 /* Get expected throughput */
432 tpt = iwl4965_get_expected_tpt(tbl, scale_index);
433
434 /*
435 * Keep track of only the latest 62 tx frame attempts in this rate's
436 * history window; anything older isn't really relevant any more.
437 * If we have filled up the sliding window, drop the oldest attempt;
438 * if the oldest attempt (highest bit in bitmap) shows "success",
439 * subtract "1" from the success counter (this is the main reason
440 * we keep these bitmaps!).
441 */
442 while (attempts > 0) {
443 if (window->counter >= IWL_RATE_MAX_WINDOW) {
444
445 /* remove earliest */
446 window->counter = IWL_RATE_MAX_WINDOW - 1;
447
448 if (window->data & mask) {
449 window->data &= ~mask;
450 window->success_counter--;
451 }
452 }
453
454 /* Increment frames-attempted counter */
455 window->counter++;
456
457 /* Shift bitmap by one frame to throw away oldest history */
458 window->data <<= 1;
459
460 /* Mark the most recent #successes attempts as successful */
461 if (successes > 0) {
462 window->success_counter++;
463 window->data |= 0x1;
464 successes--;
465 }
466
467 attempts--;
468 }
469
470 /* Calculate current success ratio, avoid divide-by-0! */
471 if (window->counter > 0)
472 window->success_ratio = 128 * (100 * window->success_counter)
473 / window->counter;
474 else
475 window->success_ratio = IWL_INVALID_VALUE;
476
477 fail_count = window->counter - window->success_counter;
478
479 /* Calculate average throughput, if we have enough history. */
480 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
481 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
482 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
483 else
484 window->average_tpt = IWL_INVALID_VALUE;
485
486 /* Tag this window as having been updated */
487 window->stamp = jiffies;
488
489 return 0;
490}
491
492/*
493 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
494 */
495static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
496 struct iwl_scale_tbl_info *tbl,
497 int index, u8 use_green)
498{
499 u32 rate_n_flags = 0;
500
501 if (is_legacy(tbl->lq_type)) {
502 rate_n_flags = iwlegacy_rates[index].plcp;
503 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
504 rate_n_flags |= RATE_MCS_CCK_MSK;
505
506 } else if (is_Ht(tbl->lq_type)) {
507 if (index > IWL_LAST_OFDM_RATE) {
508 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
509 index = IWL_LAST_OFDM_RATE;
510 }
511 rate_n_flags = RATE_MCS_HT_MSK;
512
513 if (is_siso(tbl->lq_type))
514 rate_n_flags |= iwlegacy_rates[index].plcp_siso;
515 else
516 rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
517 } else {
518 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
519 }
520
521 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
522 RATE_MCS_ANT_ABC_MSK);
523
524 if (is_Ht(tbl->lq_type)) {
525 if (tbl->is_ht40) {
526 if (tbl->is_dup)
527 rate_n_flags |= RATE_MCS_DUP_MSK;
528 else
529 rate_n_flags |= RATE_MCS_HT40_MSK;
530 }
531 if (tbl->is_SGI)
532 rate_n_flags |= RATE_MCS_SGI_MSK;
533
534 if (use_green) {
535 rate_n_flags |= RATE_MCS_GF_MSK;
536 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
537 rate_n_flags &= ~RATE_MCS_SGI_MSK;
538 IWL_ERR(priv, "GF was set with SGI:SISO\n");
539 }
540 }
541 }
542 return rate_n_flags;
543}
544
545/*
546 * Interpret uCode API's rate_n_flags format,
547 * fill "search" or "active" tx mode table.
548 */
549static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
550 enum ieee80211_band band,
551 struct iwl_scale_tbl_info *tbl,
552 int *rate_idx)
553{
554 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
555 u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
556 u8 mcs;
557
558 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
559 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
560
561 if (*rate_idx == IWL_RATE_INVALID) {
562 *rate_idx = -1;
563 return -EINVAL;
564 }
565 tbl->is_SGI = 0; /* default legacy setup */
566 tbl->is_ht40 = 0;
567 tbl->is_dup = 0;
568 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
569 tbl->lq_type = LQ_NONE;
570 tbl->max_search = IWL_MAX_SEARCH;
571
572 /* legacy rate format */
573 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
574 if (iwl4965_num_of_ant == 1) {
575 if (band == IEEE80211_BAND_5GHZ)
576 tbl->lq_type = LQ_A;
577 else
578 tbl->lq_type = LQ_G;
579 }
580 /* HT rate format */
581 } else {
582 if (rate_n_flags & RATE_MCS_SGI_MSK)
583 tbl->is_SGI = 1;
584
585 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
586 (rate_n_flags & RATE_MCS_DUP_MSK))
587 tbl->is_ht40 = 1;
588
589 if (rate_n_flags & RATE_MCS_DUP_MSK)
590 tbl->is_dup = 1;
591
592 mcs = iwl4965_rs_extract_rate(rate_n_flags);
593
594 /* SISO */
595 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
596 if (iwl4965_num_of_ant == 1)
597 tbl->lq_type = LQ_SISO; /*else NONE*/
598 /* MIMO2 */
599 } else {
600 if (iwl4965_num_of_ant == 2)
601 tbl->lq_type = LQ_MIMO2;
602 }
603 }
604 return 0;
605}
606
607/* switch to another antenna/antennas and return 1 */
608/* if no other valid antenna found, return 0 */
609static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
610 struct iwl_scale_tbl_info *tbl)
611{
612 u8 new_ant_type;
613
614 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
615 return 0;
616
617 if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
618 return 0;
619
620 new_ant_type = ant_toggle_lookup[tbl->ant_type];
621
622 while ((new_ant_type != tbl->ant_type) &&
623 !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
624 new_ant_type = ant_toggle_lookup[new_ant_type];
625
626 if (new_ant_type == tbl->ant_type)
627 return 0;
628
629 tbl->ant_type = new_ant_type;
630 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
631 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
632 return 1;
633}
634
635/**
636 * Green-field mode is valid if the station supports it and
637 * there are no non-GF stations present in the BSS.
638 */
639static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
640{
641 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
642 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
643
644 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
645 !(ctx->ht.non_gf_sta_present);
646}
647
648/**
649 * iwl4965_rs_get_supported_rates - get the available rates
650 *
651 * if management frame or broadcast frame only return
652 * basic available rates.
653 *
654 */
655static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
656 struct ieee80211_hdr *hdr,
657 enum iwl_table_type rate_type)
658{
659 if (is_legacy(rate_type)) {
660 return lq_sta->active_legacy_rate;
661 } else {
662 if (is_siso(rate_type))
663 return lq_sta->active_siso_rate;
664 else
665 return lq_sta->active_mimo2_rate;
666 }
667}
668
669static u16
670iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
671 int rate_type)
672{
673 u8 high = IWL_RATE_INVALID;
674 u8 low = IWL_RATE_INVALID;
675
676 /* 802.11A or ht walks to the next literal adjacent rate in
677 * the rate table */
678 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
679 int i;
680 u32 mask;
681
682 /* Find the previous rate that is in the rate mask */
683 i = index - 1;
684 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
685 if (rate_mask & mask) {
686 low = i;
687 break;
688 }
689 }
690
691 /* Find the next rate that is in the rate mask */
692 i = index + 1;
693 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
694 if (rate_mask & mask) {
695 high = i;
696 break;
697 }
698 }
699
700 return (high << 8) | low;
701 }
702
703 low = index;
704 while (low != IWL_RATE_INVALID) {
705 low = iwlegacy_rates[low].prev_rs;
706 if (low == IWL_RATE_INVALID)
707 break;
708 if (rate_mask & (1 << low))
709 break;
710 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
711 }
712
713 high = index;
714 while (high != IWL_RATE_INVALID) {
715 high = iwlegacy_rates[high].next_rs;
716 if (high == IWL_RATE_INVALID)
717 break;
718 if (rate_mask & (1 << high))
719 break;
720 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
721 }
722
723 return (high << 8) | low;
724}
725
726static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
727 struct iwl_scale_tbl_info *tbl,
728 u8 scale_index, u8 ht_possible)
729{
730 s32 low;
731 u16 rate_mask;
732 u16 high_low;
733 u8 switch_to_legacy = 0;
734 u8 is_green = lq_sta->is_green;
735 struct iwl_priv *priv = lq_sta->drv;
736
737 /* check if we need to switch from HT to legacy rates.
738 * assumption is that mandatory rates (1Mbps or 6Mbps)
739 * are always supported (spec demand) */
740 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
741 switch_to_legacy = 1;
742 scale_index = rs_ht_to_legacy[scale_index];
743 if (lq_sta->band == IEEE80211_BAND_5GHZ)
744 tbl->lq_type = LQ_A;
745 else
746 tbl->lq_type = LQ_G;
747
748 if (iwl4965_num_of_ant(tbl->ant_type) > 1)
749 tbl->ant_type =
750 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
751
752 tbl->is_ht40 = 0;
753 tbl->is_SGI = 0;
754 tbl->max_search = IWL_MAX_SEARCH;
755 }
756
757 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
758
759 /* Mask with station rate restriction */
760 if (is_legacy(tbl->lq_type)) {
761 /* supp_rates has no CCK bits in A mode */
762 if (lq_sta->band == IEEE80211_BAND_5GHZ)
763 rate_mask = (u16)(rate_mask &
764 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
765 else
766 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
767 }
768
769 /* If we switched from HT to legacy, check current rate */
770 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
771 low = scale_index;
772 goto out;
773 }
774
775 high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
776 scale_index, rate_mask,
777 tbl->lq_type);
778 low = high_low & 0xff;
779
780 if (low == IWL_RATE_INVALID)
781 low = scale_index;
782
783out:
784 return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
785}
786
787/*
788 * Simple function to compare two rate scale table types
789 */
790static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
791 struct iwl_scale_tbl_info *b)
792{
793 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
794 (a->is_SGI == b->is_SGI);
795}
796
797/*
798 * mac80211 sends us Tx status
799 */
800static void
801iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
802 struct ieee80211_sta *sta, void *priv_sta,
803 struct sk_buff *skb)
804{
805 int legacy_success;
806 int retries;
807 int rs_index, mac_index, i;
808 struct iwl_lq_sta *lq_sta = priv_sta;
809 struct iwl_link_quality_cmd *table;
810 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
811 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
812 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
813 enum mac80211_rate_control_flags mac_flags;
814 u32 tx_rate;
815 struct iwl_scale_tbl_info tbl_type;
816 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
817 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
818 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
819
820 IWL_DEBUG_RATE_LIMIT(priv,
821 "get frame ack response, update rate scale window\n");
822
823 /* Treat uninitialized rate scaling data same as non-existing. */
824 if (!lq_sta) {
825 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
826 return;
827 } else if (!lq_sta->drv) {
828 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
829 return;
830 }
831
832 if (!ieee80211_is_data(hdr->frame_control) ||
833 info->flags & IEEE80211_TX_CTL_NO_ACK)
834 return;
835
836 /* This packet was aggregated but doesn't carry status info */
837 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
838 !(info->flags & IEEE80211_TX_STAT_AMPDU))
839 return;
840
841 /*
842 * Ignore this Tx frame response if its initial rate doesn't match
843 * that of latest Link Quality command. There may be stragglers
844 * from a previous Link Quality command, but we're no longer interested
845 * in those; they're either from the "active" mode while we're trying
846 * to check "search" mode, or a prior "search" mode after we've moved
847 * to a new "search" mode (which might become the new "active" mode).
848 */
849 table = &lq_sta->lq;
850 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
851 iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
852 priv->band, &tbl_type, &rs_index);
853 if (priv->band == IEEE80211_BAND_5GHZ)
854 rs_index -= IWL_FIRST_OFDM_RATE;
855 mac_flags = info->status.rates[0].flags;
856 mac_index = info->status.rates[0].idx;
857 /* For HT packets, map MCS to PLCP */
858 if (mac_flags & IEEE80211_TX_RC_MCS) {
859 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
860 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
861 mac_index++;
862 /*
863 * mac80211 HT index is always zero-indexed; we need to move
864 * HT OFDM rates after CCK rates in 2.4 GHz band
865 */
866 if (priv->band == IEEE80211_BAND_2GHZ)
867 mac_index += IWL_FIRST_OFDM_RATE;
868 }
869 /* Here we actually compare this rate to the latest LQ command */
870 if ((mac_index < 0) ||
871 (tbl_type.is_SGI !=
872 !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
873 (tbl_type.is_ht40 !=
874 !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
875 (tbl_type.is_dup !=
876 !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
877 (tbl_type.ant_type != info->antenna_sel_tx) ||
878 (!!(tx_rate & RATE_MCS_HT_MSK) !=
879 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
880 (!!(tx_rate & RATE_MCS_GF_MSK) !=
881 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
882 (rs_index != mac_index)) {
883 IWL_DEBUG_RATE(priv,
884 "initial rate %d does not match %d (0x%x)\n",
885 mac_index, rs_index, tx_rate);
886 /*
887 * Since rates mis-match, the last LQ command may have failed.
888 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
889 * ... driver.
890 */
891 lq_sta->missed_rate_counter++;
892 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
893 lq_sta->missed_rate_counter = 0;
894 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
895 CMD_ASYNC, false);
896 }
897 /* Regardless, ignore this status info for outdated rate */
898 return;
899 } else
900 /* Rate did match, so reset the missed_rate_counter */
901 lq_sta->missed_rate_counter = 0;
902
903 /* Figure out if rate scale algorithm is in active or search table */
904 if (iwl4965_table_type_matches(&tbl_type,
905 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
906 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
907 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
908 } else if (iwl4965_table_type_matches(&tbl_type,
909 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
910 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
911 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
912 } else {
913 IWL_DEBUG_RATE(priv,
914 "Neither active nor search matches tx rate\n");
915 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
916 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
917 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
918 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
919 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
920 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
921 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
922 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
923 /*
924 * no matching table found, let's by-pass the data collection
925 * and continue to perform rate scale to find the rate table
926 */
927 iwl4965_rs_stay_in_table(lq_sta, true);
928 goto done;
929 }
930
931 /*
932 * Updating the frame history depends on whether packets were
933 * aggregated.
934 *
935 * For aggregation, all packets were transmitted at the same rate, the
936 * first index into rate scale table.
937 */
938 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
939 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
940 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
941 &rs_index);
942 iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
943 info->status.ampdu_len,
944 info->status.ampdu_ack_len);
945
946 /* Update success/fail counts if not searching for new mode */
947 if (lq_sta->stay_in_tbl) {
948 lq_sta->total_success += info->status.ampdu_ack_len;
949 lq_sta->total_failed += (info->status.ampdu_len -
950 info->status.ampdu_ack_len);
951 }
952 } else {
953 /*
954 * For legacy, update frame history with for each Tx retry.
955 */
956 retries = info->status.rates[0].count - 1;
957 /* HW doesn't send more than 15 retries */
958 retries = min(retries, 15);
959
960 /* The last transmission may have been successful */
961 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
962 /* Collect data for each rate used during failed TX attempts */
963 for (i = 0; i <= retries; ++i) {
964 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
965 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
966 &tbl_type, &rs_index);
967 /*
968 * Only collect stats if retried rate is in the same RS
969 * table as active/search.
970 */
971 if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
972 tmp_tbl = curr_tbl;
973 else if (iwl4965_table_type_matches(&tbl_type,
974 other_tbl))
975 tmp_tbl = other_tbl;
976 else
977 continue;
978 iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
979 i < retries ? 0 : legacy_success);
980 }
981
982 /* Update success/fail counts if not searching for new mode */
983 if (lq_sta->stay_in_tbl) {
984 lq_sta->total_success += legacy_success;
985 lq_sta->total_failed += retries + (1 - legacy_success);
986 }
987 }
988 /* The last TX rate is cached in lq_sta; it's set in if/else above */
989 lq_sta->last_rate_n_flags = tx_rate;
990done:
991 /* See if there's a better rate or modulation mode to try. */
992 if (sta && sta->supp_rates[sband->band])
993 iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
994}
995
996/*
997 * Begin a period of staying with a selected modulation mode.
998 * Set "stay_in_tbl" flag to prevent any mode switches.
999 * Set frame tx success limits according to legacy vs. high-throughput,
1000 * and reset overall (spanning all rates) tx success history statistics.
1001 * These control how long we stay using same modulation mode before
1002 * searching for a new mode.
1003 */
1004static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1005 struct iwl_lq_sta *lq_sta)
1006{
1007 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1008 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1009 if (is_legacy) {
1010 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1011 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1012 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1013 } else {
1014 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1015 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1016 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1017 }
1018 lq_sta->table_count = 0;
1019 lq_sta->total_failed = 0;
1020 lq_sta->total_success = 0;
1021 lq_sta->flush_timer = jiffies;
1022 lq_sta->action_counter = 0;
1023}
1024
1025/*
1026 * Find correct throughput table for given mode of modulation
1027 */
1028static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1029 struct iwl_scale_tbl_info *tbl)
1030{
1031 /* Used to choose among HT tables */
1032 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1033
1034 /* Check for invalid LQ type */
1035 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1036 tbl->expected_tpt = expected_tpt_legacy;
1037 return;
1038 }
1039
1040 /* Legacy rates have only one table */
1041 if (is_legacy(tbl->lq_type)) {
1042 tbl->expected_tpt = expected_tpt_legacy;
1043 return;
1044 }
1045
1046 /* Choose among many HT tables depending on number of streams
1047 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1048 * status */
1049 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1050 ht_tbl_pointer = expected_tpt_siso20MHz;
1051 else if (is_siso(tbl->lq_type))
1052 ht_tbl_pointer = expected_tpt_siso40MHz;
1053 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1054 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1055 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1056 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1057
1058 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1059 tbl->expected_tpt = ht_tbl_pointer[0];
1060 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1061 tbl->expected_tpt = ht_tbl_pointer[1];
1062 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1063 tbl->expected_tpt = ht_tbl_pointer[2];
1064 else /* AGG+SGI */
1065 tbl->expected_tpt = ht_tbl_pointer[3];
1066}
1067
1068/*
1069 * Find starting rate for new "search" high-throughput mode of modulation.
1070 * Goal is to find lowest expected rate (under perfect conditions) that is
1071 * above the current measured throughput of "active" mode, to give new mode
1072 * a fair chance to prove itself without too many challenges.
1073 *
1074 * This gets called when transitioning to more aggressive modulation
1075 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1076 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1077 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1078 * bit rate will typically need to increase, but not if performance was bad.
1079 */
1080static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
1081 struct iwl_lq_sta *lq_sta,
1082 struct iwl_scale_tbl_info *tbl, /* "search" */
1083 u16 rate_mask, s8 index)
1084{
1085 /* "active" values */
1086 struct iwl_scale_tbl_info *active_tbl =
1087 &(lq_sta->lq_info[lq_sta->active_tbl]);
1088 s32 active_sr = active_tbl->win[index].success_ratio;
1089 s32 active_tpt = active_tbl->expected_tpt[index];
1090
1091 /* expected "search" throughput */
1092 s32 *tpt_tbl = tbl->expected_tpt;
1093
1094 s32 new_rate, high, low, start_hi;
1095 u16 high_low;
1096 s8 rate = index;
1097
1098 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1099
1100 for (; ;) {
1101 high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
1102 tbl->lq_type);
1103
1104 low = high_low & 0xff;
1105 high = (high_low >> 8) & 0xff;
1106
1107 /*
1108 * Lower the "search" bit rate, to give new "search" mode
1109 * approximately the same throughput as "active" if:
1110 *
1111 * 1) "Active" mode has been working modestly well (but not
1112 * great), and expected "search" throughput (under perfect
1113 * conditions) at candidate rate is above the actual
1114 * measured "active" throughput (but less than expected
1115 * "active" throughput under perfect conditions).
1116 * OR
1117 * 2) "Active" mode has been working perfectly or very well
1118 * and expected "search" throughput (under perfect
1119 * conditions) at candidate rate is above expected
1120 * "active" throughput (under perfect conditions).
1121 */
1122 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1123 ((active_sr > IWL_RATE_DECREASE_TH) &&
1124 (active_sr <= IWL_RATE_HIGH_TH) &&
1125 (tpt_tbl[rate] <= active_tpt))) ||
1126 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1127 (tpt_tbl[rate] > active_tpt))) {
1128
1129 /* (2nd or later pass)
1130 * If we've already tried to raise the rate, and are
1131 * now trying to lower it, use the higher rate. */
1132 if (start_hi != IWL_RATE_INVALID) {
1133 new_rate = start_hi;
1134 break;
1135 }
1136
1137 new_rate = rate;
1138
1139 /* Loop again with lower rate */
1140 if (low != IWL_RATE_INVALID)
1141 rate = low;
1142
1143 /* Lower rate not available, use the original */
1144 else
1145 break;
1146
1147 /* Else try to raise the "search" rate to match "active" */
1148 } else {
1149 /* (2nd or later pass)
1150 * If we've already tried to lower the rate, and are
1151 * now trying to raise it, use the lower rate. */
1152 if (new_rate != IWL_RATE_INVALID)
1153 break;
1154
1155 /* Loop again with higher rate */
1156 else if (high != IWL_RATE_INVALID) {
1157 start_hi = high;
1158 rate = high;
1159
1160 /* Higher rate not available, use the original */
1161 } else {
1162 new_rate = rate;
1163 break;
1164 }
1165 }
1166 }
1167
1168 return new_rate;
1169}
1170
1171/*
1172 * Set up search table for MIMO2
1173 */
1174static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
1175 struct iwl_lq_sta *lq_sta,
1176 struct ieee80211_conf *conf,
1177 struct ieee80211_sta *sta,
1178 struct iwl_scale_tbl_info *tbl, int index)
1179{
1180 u16 rate_mask;
1181 s32 rate;
1182 s8 is_green = lq_sta->is_green;
1183 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1184 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1185
1186 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1187 return -1;
1188
1189 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1190 == WLAN_HT_CAP_SM_PS_STATIC)
1191 return -1;
1192
1193 /* Need both Tx chains/antennas to support MIMO */
1194 if (priv->hw_params.tx_chains_num < 2)
1195 return -1;
1196
1197 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1198
1199 tbl->lq_type = LQ_MIMO2;
1200 tbl->is_dup = lq_sta->is_dup;
1201 tbl->action = 0;
1202 tbl->max_search = IWL_MAX_SEARCH;
1203 rate_mask = lq_sta->active_mimo2_rate;
1204
1205 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1206 tbl->is_ht40 = 1;
1207 else
1208 tbl->is_ht40 = 0;
1209
1210 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1211
1212 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1213
1214 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
1215 rate, rate_mask);
1216 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1217 IWL_DEBUG_RATE(priv,
1218 "Can't switch with index %d rate mask %x\n",
1219 rate, rate_mask);
1220 return -1;
1221 }
1222 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1223 tbl, rate, is_green);
1224
1225 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1226 tbl->current_rate, is_green);
1227 return 0;
1228}
1229
1230/*
1231 * Set up search table for SISO
1232 */
1233static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
1234 struct iwl_lq_sta *lq_sta,
1235 struct ieee80211_conf *conf,
1236 struct ieee80211_sta *sta,
1237 struct iwl_scale_tbl_info *tbl, int index)
1238{
1239 u16 rate_mask;
1240 u8 is_green = lq_sta->is_green;
1241 s32 rate;
1242 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1243 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1244
1245 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1246 return -1;
1247
1248 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1249
1250 tbl->is_dup = lq_sta->is_dup;
1251 tbl->lq_type = LQ_SISO;
1252 tbl->action = 0;
1253 tbl->max_search = IWL_MAX_SEARCH;
1254 rate_mask = lq_sta->active_siso_rate;
1255
1256 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1257 tbl->is_ht40 = 1;
1258 else
1259 tbl->is_ht40 = 0;
1260
1261 if (is_green)
1262 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1263
1264 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1265 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1266
1267 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1268 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1269 IWL_DEBUG_RATE(priv,
1270 "can not switch with index %d rate mask %x\n",
1271 rate, rate_mask);
1272 return -1;
1273 }
1274 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1275 tbl, rate, is_green);
1276 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1277 tbl->current_rate, is_green);
1278 return 0;
1279}
1280
1281/*
1282 * Try to switch to new modulation mode from legacy
1283 */
1284static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
1285 struct iwl_lq_sta *lq_sta,
1286 struct ieee80211_conf *conf,
1287 struct ieee80211_sta *sta,
1288 int index)
1289{
1290 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1291 struct iwl_scale_tbl_info *search_tbl =
1292 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1293 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1294 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1295 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1296 u8 start_action;
1297 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1298 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1299 int ret = 0;
1300 u8 update_search_tbl_counter = 0;
1301
1302 tbl->action = IWL_LEGACY_SWITCH_SISO;
1303
1304 start_action = tbl->action;
1305 for (; ;) {
1306 lq_sta->action_counter++;
1307 switch (tbl->action) {
1308 case IWL_LEGACY_SWITCH_ANTENNA1:
1309 case IWL_LEGACY_SWITCH_ANTENNA2:
1310 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1311
1312 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1313 tx_chains_num <= 1) ||
1314 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1315 tx_chains_num <= 2))
1316 break;
1317
1318 /* Don't change antenna if success has been great */
1319 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1320 break;
1321
1322 /* Set up search table to try other antenna */
1323 memcpy(search_tbl, tbl, sz);
1324
1325 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1326 &search_tbl->current_rate, search_tbl)) {
1327 update_search_tbl_counter = 1;
1328 iwl4965_rs_set_expected_tpt_table(lq_sta,
1329 search_tbl);
1330 goto out;
1331 }
1332 break;
1333 case IWL_LEGACY_SWITCH_SISO:
1334 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1335
1336 /* Set up search table to try SISO */
1337 memcpy(search_tbl, tbl, sz);
1338 search_tbl->is_SGI = 0;
1339 ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
1340 search_tbl, index);
1341 if (!ret) {
1342 lq_sta->action_counter = 0;
1343 goto out;
1344 }
1345
1346 break;
1347 case IWL_LEGACY_SWITCH_MIMO2_AB:
1348 case IWL_LEGACY_SWITCH_MIMO2_AC:
1349 case IWL_LEGACY_SWITCH_MIMO2_BC:
1350 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1351
1352 /* Set up search table to try MIMO */
1353 memcpy(search_tbl, tbl, sz);
1354 search_tbl->is_SGI = 0;
1355
1356 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1357 search_tbl->ant_type = ANT_AB;
1358 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1359 search_tbl->ant_type = ANT_AC;
1360 else
1361 search_tbl->ant_type = ANT_BC;
1362
1363 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1364 search_tbl->ant_type))
1365 break;
1366
1367 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1368 conf, sta,
1369 search_tbl, index);
1370 if (!ret) {
1371 lq_sta->action_counter = 0;
1372 goto out;
1373 }
1374 break;
1375 }
1376 tbl->action++;
1377 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1378 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1379
1380 if (tbl->action == start_action)
1381 break;
1382
1383 }
1384 search_tbl->lq_type = LQ_NONE;
1385 return 0;
1386
1387out:
1388 lq_sta->search_better_tbl = 1;
1389 tbl->action++;
1390 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1391 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1392 if (update_search_tbl_counter)
1393 search_tbl->action = tbl->action;
1394 return 0;
1395
1396}
1397
1398/*
1399 * Try to switch to new modulation mode from SISO
1400 */
1401static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
1402 struct iwl_lq_sta *lq_sta,
1403 struct ieee80211_conf *conf,
1404 struct ieee80211_sta *sta, int index)
1405{
1406 u8 is_green = lq_sta->is_green;
1407 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1408 struct iwl_scale_tbl_info *search_tbl =
1409 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1410 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1411 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1412 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1413 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1414 u8 start_action;
1415 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1416 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1417 u8 update_search_tbl_counter = 0;
1418 int ret;
1419
1420 start_action = tbl->action;
1421
1422 for (;;) {
1423 lq_sta->action_counter++;
1424 switch (tbl->action) {
1425 case IWL_SISO_SWITCH_ANTENNA1:
1426 case IWL_SISO_SWITCH_ANTENNA2:
1427 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1428 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1429 tx_chains_num <= 1) ||
1430 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1431 tx_chains_num <= 2))
1432 break;
1433
1434 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1435 break;
1436
1437 memcpy(search_tbl, tbl, sz);
1438 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1439 &search_tbl->current_rate, search_tbl)) {
1440 update_search_tbl_counter = 1;
1441 goto out;
1442 }
1443 break;
1444 case IWL_SISO_SWITCH_MIMO2_AB:
1445 case IWL_SISO_SWITCH_MIMO2_AC:
1446 case IWL_SISO_SWITCH_MIMO2_BC:
1447 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1448 memcpy(search_tbl, tbl, sz);
1449 search_tbl->is_SGI = 0;
1450
1451 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1452 search_tbl->ant_type = ANT_AB;
1453 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1454 search_tbl->ant_type = ANT_AC;
1455 else
1456 search_tbl->ant_type = ANT_BC;
1457
1458 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1459 search_tbl->ant_type))
1460 break;
1461
1462 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1463 conf, sta,
1464 search_tbl, index);
1465 if (!ret)
1466 goto out;
1467 break;
1468 case IWL_SISO_SWITCH_GI:
1469 if (!tbl->is_ht40 && !(ht_cap->cap &
1470 IEEE80211_HT_CAP_SGI_20))
1471 break;
1472 if (tbl->is_ht40 && !(ht_cap->cap &
1473 IEEE80211_HT_CAP_SGI_40))
1474 break;
1475
1476 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1477
1478 memcpy(search_tbl, tbl, sz);
1479 if (is_green) {
1480 if (!tbl->is_SGI)
1481 break;
1482 else
1483 IWL_ERR(priv,
1484 "SGI was set in GF+SISO\n");
1485 }
1486 search_tbl->is_SGI = !tbl->is_SGI;
1487 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1488 if (tbl->is_SGI) {
1489 s32 tpt = lq_sta->last_tpt / 100;
1490 if (tpt >= search_tbl->expected_tpt[index])
1491 break;
1492 }
1493 search_tbl->current_rate =
1494 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1495 index, is_green);
1496 update_search_tbl_counter = 1;
1497 goto out;
1498 }
1499 tbl->action++;
1500 if (tbl->action > IWL_SISO_SWITCH_GI)
1501 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1502
1503 if (tbl->action == start_action)
1504 break;
1505 }
1506 search_tbl->lq_type = LQ_NONE;
1507 return 0;
1508
1509 out:
1510 lq_sta->search_better_tbl = 1;
1511 tbl->action++;
1512 if (tbl->action > IWL_SISO_SWITCH_GI)
1513 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1514 if (update_search_tbl_counter)
1515 search_tbl->action = tbl->action;
1516
1517 return 0;
1518}
1519
1520/*
1521 * Try to switch to new modulation mode from MIMO2
1522 */
1523static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
1524 struct iwl_lq_sta *lq_sta,
1525 struct ieee80211_conf *conf,
1526 struct ieee80211_sta *sta, int index)
1527{
1528 s8 is_green = lq_sta->is_green;
1529 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1530 struct iwl_scale_tbl_info *search_tbl =
1531 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1532 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1533 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1534 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1535 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1536 u8 start_action;
1537 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1538 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1539 u8 update_search_tbl_counter = 0;
1540 int ret;
1541
1542 start_action = tbl->action;
1543 for (;;) {
1544 lq_sta->action_counter++;
1545 switch (tbl->action) {
1546 case IWL_MIMO2_SWITCH_ANTENNA1:
1547 case IWL_MIMO2_SWITCH_ANTENNA2:
1548 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1549
1550 if (tx_chains_num <= 2)
1551 break;
1552
1553 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1554 break;
1555
1556 memcpy(search_tbl, tbl, sz);
1557 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1558 &search_tbl->current_rate, search_tbl)) {
1559 update_search_tbl_counter = 1;
1560 goto out;
1561 }
1562 break;
1563 case IWL_MIMO2_SWITCH_SISO_A:
1564 case IWL_MIMO2_SWITCH_SISO_B:
1565 case IWL_MIMO2_SWITCH_SISO_C:
1566 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1567
1568 /* Set up new search table for SISO */
1569 memcpy(search_tbl, tbl, sz);
1570
1571 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1572 search_tbl->ant_type = ANT_A;
1573 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1574 search_tbl->ant_type = ANT_B;
1575 else
1576 search_tbl->ant_type = ANT_C;
1577
1578 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1579 search_tbl->ant_type))
1580 break;
1581
1582 ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
1583 conf, sta,
1584 search_tbl, index);
1585 if (!ret)
1586 goto out;
1587
1588 break;
1589
1590 case IWL_MIMO2_SWITCH_GI:
1591 if (!tbl->is_ht40 && !(ht_cap->cap &
1592 IEEE80211_HT_CAP_SGI_20))
1593 break;
1594 if (tbl->is_ht40 && !(ht_cap->cap &
1595 IEEE80211_HT_CAP_SGI_40))
1596 break;
1597
1598 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1599
1600 /* Set up new search table for MIMO2 */
1601 memcpy(search_tbl, tbl, sz);
1602 search_tbl->is_SGI = !tbl->is_SGI;
1603 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1604 /*
1605 * If active table already uses the fastest possible
1606 * modulation (dual stream with short guard interval),
1607 * and it's working well, there's no need to look
1608 * for a better type of modulation!
1609 */
1610 if (tbl->is_SGI) {
1611 s32 tpt = lq_sta->last_tpt / 100;
1612 if (tpt >= search_tbl->expected_tpt[index])
1613 break;
1614 }
1615 search_tbl->current_rate =
1616 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1617 index, is_green);
1618 update_search_tbl_counter = 1;
1619 goto out;
1620
1621 }
1622 tbl->action++;
1623 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1624 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1625
1626 if (tbl->action == start_action)
1627 break;
1628 }
1629 search_tbl->lq_type = LQ_NONE;
1630 return 0;
1631 out:
1632 lq_sta->search_better_tbl = 1;
1633 tbl->action++;
1634 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1635 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1636 if (update_search_tbl_counter)
1637 search_tbl->action = tbl->action;
1638
1639 return 0;
1640
1641}
1642
1643/*
1644 * Check whether we should continue using same modulation mode, or
1645 * begin search for a new mode, based on:
1646 * 1) # tx successes or failures while using this mode
1647 * 2) # times calling this function
1648 * 3) elapsed time in this mode (not used, for now)
1649 */
1650static void
1651iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1652{
1653 struct iwl_scale_tbl_info *tbl;
1654 int i;
1655 int active_tbl;
1656 int flush_interval_passed = 0;
1657 struct iwl_priv *priv;
1658
1659 priv = lq_sta->drv;
1660 active_tbl = lq_sta->active_tbl;
1661
1662 tbl = &(lq_sta->lq_info[active_tbl]);
1663
1664 /* If we've been disallowing search, see if we should now allow it */
1665 if (lq_sta->stay_in_tbl) {
1666
1667 /* Elapsed time using current modulation mode */
1668 if (lq_sta->flush_timer)
1669 flush_interval_passed =
1670 time_after(jiffies,
1671 (unsigned long)(lq_sta->flush_timer +
1672 IWL_RATE_SCALE_FLUSH_INTVL));
1673
1674 /*
1675 * Check if we should allow search for new modulation mode.
1676 * If many frames have failed or succeeded, or we've used
1677 * this same modulation for a long time, allow search, and
1678 * reset history stats that keep track of whether we should
1679 * allow a new search. Also (below) reset all bitmaps and
1680 * stats in active history.
1681 */
1682 if (force_search ||
1683 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1684 (lq_sta->total_success > lq_sta->max_success_limit) ||
1685 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1686 && (flush_interval_passed))) {
1687 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
1688 lq_sta->total_failed,
1689 lq_sta->total_success,
1690 flush_interval_passed);
1691
1692 /* Allow search for new mode */
1693 lq_sta->stay_in_tbl = 0; /* only place reset */
1694 lq_sta->total_failed = 0;
1695 lq_sta->total_success = 0;
1696 lq_sta->flush_timer = 0;
1697
1698 /*
1699 * Else if we've used this modulation mode enough repetitions
1700 * (regardless of elapsed time or success/failure), reset
1701 * history bitmaps and rate-specific stats for all rates in
1702 * active table.
1703 */
1704 } else {
1705 lq_sta->table_count++;
1706 if (lq_sta->table_count >=
1707 lq_sta->table_count_limit) {
1708 lq_sta->table_count = 0;
1709
1710 IWL_DEBUG_RATE(priv,
1711 "LQ: stay in table clear win\n");
1712 for (i = 0; i < IWL_RATE_COUNT; i++)
1713 iwl4965_rs_rate_scale_clear_window(
1714 &(tbl->win[i]));
1715 }
1716 }
1717
1718 /* If transitioning to allow "search", reset all history
1719 * bitmaps and stats in active table (this will become the new
1720 * "search" table). */
1721 if (!lq_sta->stay_in_tbl) {
1722 for (i = 0; i < IWL_RATE_COUNT; i++)
1723 iwl4965_rs_rate_scale_clear_window(
1724 &(tbl->win[i]));
1725 }
1726 }
1727}
1728
1729/*
1730 * setup rate table in uCode
1731 * return rate_n_flags as used in the table
1732 */
1733static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
1734 struct iwl_rxon_context *ctx,
1735 struct iwl_lq_sta *lq_sta,
1736 struct iwl_scale_tbl_info *tbl,
1737 int index, u8 is_green)
1738{
1739 u32 rate;
1740
1741 /* Update uCode's rate table. */
1742 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
1743 iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
1744 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1745
1746 return rate;
1747}
1748
1749/*
1750 * Do rate scaling and search for new modulation mode.
1751 */
1752static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
1753 struct sk_buff *skb,
1754 struct ieee80211_sta *sta,
1755 struct iwl_lq_sta *lq_sta)
1756{
1757 struct ieee80211_hw *hw = priv->hw;
1758 struct ieee80211_conf *conf = &hw->conf;
1759 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1760 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1761 int low = IWL_RATE_INVALID;
1762 int high = IWL_RATE_INVALID;
1763 int index;
1764 int i;
1765 struct iwl_rate_scale_data *window = NULL;
1766 int current_tpt = IWL_INVALID_VALUE;
1767 int low_tpt = IWL_INVALID_VALUE;
1768 int high_tpt = IWL_INVALID_VALUE;
1769 u32 fail_count;
1770 s8 scale_action = 0;
1771 u16 rate_mask;
1772 u8 update_lq = 0;
1773 struct iwl_scale_tbl_info *tbl, *tbl1;
1774 u16 rate_scale_index_msk = 0;
1775 u32 rate;
1776 u8 is_green = 0;
1777 u8 active_tbl = 0;
1778 u8 done_search = 0;
1779 u16 high_low;
1780 s32 sr;
1781 u8 tid = MAX_TID_COUNT;
1782 struct iwl_tid_data *tid_data;
1783 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1784 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1785
1786 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1787
1788 /* Send management frames and NO_ACK data using lowest rate. */
1789 /* TODO: this could probably be improved.. */
1790 if (!ieee80211_is_data(hdr->frame_control) ||
1791 info->flags & IEEE80211_TX_CTL_NO_ACK)
1792 return;
1793
1794 if (!sta || !lq_sta)
1795 return;
1796
1797 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1798
1799 tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
1800 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1801 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1802 if (tid_data->agg.state == IWL_AGG_OFF)
1803 lq_sta->is_agg = 0;
1804 else
1805 lq_sta->is_agg = 1;
1806 } else
1807 lq_sta->is_agg = 0;
1808
1809 /*
1810 * Select rate-scale / modulation-mode table to work with in
1811 * the rest of this function: "search" if searching for better
1812 * modulation mode, or "active" if doing rate scaling within a mode.
1813 */
1814 if (!lq_sta->search_better_tbl)
1815 active_tbl = lq_sta->active_tbl;
1816 else
1817 active_tbl = 1 - lq_sta->active_tbl;
1818
1819 tbl = &(lq_sta->lq_info[active_tbl]);
1820 if (is_legacy(tbl->lq_type))
1821 lq_sta->is_green = 0;
1822 else
1823 lq_sta->is_green = iwl4965_rs_use_green(sta);
1824 is_green = lq_sta->is_green;
1825
1826 /* current tx rate */
1827 index = lq_sta->last_txrate_idx;
1828
1829 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
1830 tbl->lq_type);
1831
1832 /* rates available for this association, and for modulation mode */
1833 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1834
1835 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
1836
1837 /* mask with station rate restriction */
1838 if (is_legacy(tbl->lq_type)) {
1839 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1840 /* supp_rates has no CCK bits in A mode */
1841 rate_scale_index_msk = (u16) (rate_mask &
1842 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
1843 else
1844 rate_scale_index_msk = (u16) (rate_mask &
1845 lq_sta->supp_rates);
1846
1847 } else
1848 rate_scale_index_msk = rate_mask;
1849
1850 if (!rate_scale_index_msk)
1851 rate_scale_index_msk = rate_mask;
1852
1853 if (!((1 << index) & rate_scale_index_msk)) {
1854 IWL_ERR(priv, "Current Rate is not valid\n");
1855 if (lq_sta->search_better_tbl) {
1856 /* revert to active table if search table is not valid*/
1857 tbl->lq_type = LQ_NONE;
1858 lq_sta->search_better_tbl = 0;
1859 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1860 /* get "active" rate info */
1861 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1862 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
1863 tbl, index, is_green);
1864 }
1865 return;
1866 }
1867
1868 /* Get expected throughput table and history window for current rate */
1869 if (!tbl->expected_tpt) {
1870 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
1871 return;
1872 }
1873
1874 /* force user max rate if set by user */
1875 if ((lq_sta->max_rate_idx != -1) &&
1876 (lq_sta->max_rate_idx < index)) {
1877 index = lq_sta->max_rate_idx;
1878 update_lq = 1;
1879 window = &(tbl->win[index]);
1880 goto lq_update;
1881 }
1882
1883 window = &(tbl->win[index]);
1884
1885 /*
1886 * If there is not enough history to calculate actual average
1887 * throughput, keep analyzing results of more tx frames, without
1888 * changing rate or mode (bypass most of the rest of this function).
1889 * Set up new rate table in uCode only if old rate is not supported
1890 * in current association (use new rate found above).
1891 */
1892 fail_count = window->counter - window->success_counter;
1893 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1894 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1895 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
1896 "for index %d\n",
1897 window->success_counter, window->counter, index);
1898
1899 /* Can't calculate this yet; not enough history */
1900 window->average_tpt = IWL_INVALID_VALUE;
1901
1902 /* Should we stay with this modulation mode,
1903 * or search for a new one? */
1904 iwl4965_rs_stay_in_table(lq_sta, false);
1905
1906 goto out;
1907 }
1908 /* Else we have enough samples; calculate estimate of
1909 * actual average throughput */
1910 if (window->average_tpt != ((window->success_ratio *
1911 tbl->expected_tpt[index] + 64) / 128)) {
1912 IWL_ERR(priv,
1913 "expected_tpt should have been calculated by now\n");
1914 window->average_tpt = ((window->success_ratio *
1915 tbl->expected_tpt[index] + 64) / 128);
1916 }
1917
1918 /* If we are searching for better modulation mode, check success. */
1919 if (lq_sta->search_better_tbl) {
1920 /* If good success, continue using the "search" mode;
1921 * no need to send new link quality command, since we're
1922 * continuing to use the setup that we've been trying. */
1923 if (window->average_tpt > lq_sta->last_tpt) {
1924
1925 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
1926 "suc=%d cur-tpt=%d old-tpt=%d\n",
1927 window->success_ratio,
1928 window->average_tpt,
1929 lq_sta->last_tpt);
1930
1931 if (!is_legacy(tbl->lq_type))
1932 lq_sta->enable_counter = 1;
1933
1934 /* Swap tables; "search" becomes "active" */
1935 lq_sta->active_tbl = active_tbl;
1936 current_tpt = window->average_tpt;
1937
1938 /* Else poor success; go back to mode in "active" table */
1939 } else {
1940
1941 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
1942 "suc=%d cur-tpt=%d old-tpt=%d\n",
1943 window->success_ratio,
1944 window->average_tpt,
1945 lq_sta->last_tpt);
1946
1947 /* Nullify "search" table */
1948 tbl->lq_type = LQ_NONE;
1949
1950 /* Revert to "active" table */
1951 active_tbl = lq_sta->active_tbl;
1952 tbl = &(lq_sta->lq_info[active_tbl]);
1953
1954 /* Revert to "active" rate and throughput info */
1955 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1956 current_tpt = lq_sta->last_tpt;
1957
1958 /* Need to set up a new rate table in uCode */
1959 update_lq = 1;
1960 }
1961
1962 /* Either way, we've made a decision; modulation mode
1963 * search is done, allow rate adjustment next time. */
1964 lq_sta->search_better_tbl = 0;
1965 done_search = 1; /* Don't switch modes below! */
1966 goto lq_update;
1967 }
1968
1969 /* (Else) not in search of better modulation mode, try for better
1970 * starting rate, while staying in this mode. */
1971 high_low = iwl4965_rs_get_adjacent_rate(priv, index,
1972 rate_scale_index_msk,
1973 tbl->lq_type);
1974 low = high_low & 0xff;
1975 high = (high_low >> 8) & 0xff;
1976
1977 /* If user set max rate, dont allow higher than user constrain */
1978 if ((lq_sta->max_rate_idx != -1) &&
1979 (lq_sta->max_rate_idx < high))
1980 high = IWL_RATE_INVALID;
1981
1982 sr = window->success_ratio;
1983
1984 /* Collect measured throughputs for current and adjacent rates */
1985 current_tpt = window->average_tpt;
1986 if (low != IWL_RATE_INVALID)
1987 low_tpt = tbl->win[low].average_tpt;
1988 if (high != IWL_RATE_INVALID)
1989 high_tpt = tbl->win[high].average_tpt;
1990
1991 scale_action = 0;
1992
1993 /* Too many failures, decrease rate */
1994 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1995 IWL_DEBUG_RATE(priv,
1996 "decrease rate because of low success_ratio\n");
1997 scale_action = -1;
1998
1999 /* No throughput measured yet for adjacent rates; try increase. */
2000 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2001 (high_tpt == IWL_INVALID_VALUE)) {
2002
2003 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2004 scale_action = 1;
2005 else if (low != IWL_RATE_INVALID)
2006 scale_action = 0;
2007 }
2008
2009 /* Both adjacent throughputs are measured, but neither one has better
2010 * throughput; we're using the best rate, don't change it! */
2011 else if ((low_tpt != IWL_INVALID_VALUE) &&
2012 (high_tpt != IWL_INVALID_VALUE) &&
2013 (low_tpt < current_tpt) &&
2014 (high_tpt < current_tpt))
2015 scale_action = 0;
2016
2017 /* At least one adjacent rate's throughput is measured,
2018 * and may have better performance. */
2019 else {
2020 /* Higher adjacent rate's throughput is measured */
2021 if (high_tpt != IWL_INVALID_VALUE) {
2022 /* Higher rate has better throughput */
2023 if (high_tpt > current_tpt &&
2024 sr >= IWL_RATE_INCREASE_TH) {
2025 scale_action = 1;
2026 } else {
2027 scale_action = 0;
2028 }
2029
2030 /* Lower adjacent rate's throughput is measured */
2031 } else if (low_tpt != IWL_INVALID_VALUE) {
2032 /* Lower rate has better throughput */
2033 if (low_tpt > current_tpt) {
2034 IWL_DEBUG_RATE(priv,
2035 "decrease rate because of low tpt\n");
2036 scale_action = -1;
2037 } else if (sr >= IWL_RATE_INCREASE_TH) {
2038 scale_action = 1;
2039 }
2040 }
2041 }
2042
2043 /* Sanity check; asked for decrease, but success rate or throughput
2044 * has been good at old rate. Don't change it. */
2045 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2046 ((sr > IWL_RATE_HIGH_TH) ||
2047 (current_tpt > (100 * tbl->expected_tpt[low]))))
2048 scale_action = 0;
2049
2050 switch (scale_action) {
2051 case -1:
2052 /* Decrease starting rate, update uCode's rate table */
2053 if (low != IWL_RATE_INVALID) {
2054 update_lq = 1;
2055 index = low;
2056 }
2057
2058 break;
2059 case 1:
2060 /* Increase starting rate, update uCode's rate table */
2061 if (high != IWL_RATE_INVALID) {
2062 update_lq = 1;
2063 index = high;
2064 }
2065
2066 break;
2067 case 0:
2068 /* No change */
2069 default:
2070 break;
2071 }
2072
2073 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2074 "high %d type %d\n",
2075 index, scale_action, low, high, tbl->lq_type);
2076
2077lq_update:
2078 /* Replace uCode's rate table for the destination station. */
2079 if (update_lq)
2080 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
2081 tbl, index, is_green);
2082
2083 /* Should we stay with this modulation mode,
2084 * or search for a new one? */
2085 iwl4965_rs_stay_in_table(lq_sta, false);
2086
2087 /*
2088 * Search for new modulation mode if we're:
2089 * 1) Not changing rates right now
2090 * 2) Not just finishing up a search
2091 * 3) Allowing a new search
2092 */
2093 if (!update_lq && !done_search &&
2094 !lq_sta->stay_in_tbl && window->counter) {
2095 /* Save current throughput to compare with "search" throughput*/
2096 lq_sta->last_tpt = current_tpt;
2097
2098 /* Select a new "search" modulation mode to try.
2099 * If one is found, set up the new "search" table. */
2100 if (is_legacy(tbl->lq_type))
2101 iwl4965_rs_move_legacy_other(priv, lq_sta,
2102 conf, sta, index);
2103 else if (is_siso(tbl->lq_type))
2104 iwl4965_rs_move_siso_to_other(priv, lq_sta,
2105 conf, sta, index);
2106 else /* (is_mimo2(tbl->lq_type)) */
2107 iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
2108 conf, sta, index);
2109
2110 /* If new "search" mode was selected, set up in uCode table */
2111 if (lq_sta->search_better_tbl) {
2112 /* Access the "search" table, clear its history. */
2113 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2114 for (i = 0; i < IWL_RATE_COUNT; i++)
2115 iwl4965_rs_rate_scale_clear_window(
2116 &(tbl->win[i]));
2117
2118 /* Use new "search" start rate */
2119 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
2120
2121 IWL_DEBUG_RATE(priv,
2122 "Switch current mcs: %X index: %d\n",
2123 tbl->current_rate, index);
2124 iwl4965_rs_fill_link_cmd(priv, lq_sta,
2125 tbl->current_rate);
2126 iwl_legacy_send_lq_cmd(priv, ctx,
2127 &lq_sta->lq, CMD_ASYNC, false);
2128 } else
2129 done_search = 1;
2130 }
2131
2132 if (done_search && !lq_sta->stay_in_tbl) {
2133 /* If the "active" (non-search) mode was legacy,
2134 * and we've tried switching antennas,
2135 * but we haven't been able to try HT modes (not available),
2136 * stay with best antenna legacy modulation for a while
2137 * before next round of mode comparisons. */
2138 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2139 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2140 lq_sta->action_counter > tbl1->max_search) {
2141 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2142 iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
2143 }
2144
2145 /* If we're in an HT mode, and all 3 mode switch actions
2146 * have been tried and compared, stay in this best modulation
2147 * mode for a while before next round of mode comparisons. */
2148 if (lq_sta->enable_counter &&
2149 (lq_sta->action_counter >= tbl1->max_search)) {
2150 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2151 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2152 (tid != MAX_TID_COUNT)) {
2153 tid_data =
2154 &priv->stations[lq_sta->lq.sta_id].tid[tid];
2155 if (tid_data->agg.state == IWL_AGG_OFF) {
2156 IWL_DEBUG_RATE(priv,
2157 "try to aggregate tid %d\n",
2158 tid);
2159 iwl4965_rs_tl_turn_on_agg(priv, tid,
2160 lq_sta, sta);
2161 }
2162 }
2163 iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
2164 }
2165 }
2166
2167out:
2168 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
2169 index, is_green);
2170 i = index;
2171 lq_sta->last_txrate_idx = i;
2172}
2173
2174/**
2175 * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
2176 *
2177 * The uCode's station table contains a table of fallback rates
2178 * for automatic fallback during transmission.
2179 *
2180 * NOTE: This sets up a default set of values. These will be replaced later
2181 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2182 * rc80211_simple.
2183 *
2184 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2185 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2186 * which requires station table entry to exist).
2187 */
2188static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
2189 struct ieee80211_conf *conf,
2190 struct ieee80211_sta *sta,
2191 struct iwl_lq_sta *lq_sta)
2192{
2193 struct iwl_scale_tbl_info *tbl;
2194 int rate_idx;
2195 int i;
2196 u32 rate;
2197 u8 use_green = iwl4965_rs_use_green(sta);
2198 u8 active_tbl = 0;
2199 u8 valid_tx_ant;
2200 struct iwl_station_priv *sta_priv;
2201 struct iwl_rxon_context *ctx;
2202
2203 if (!sta || !lq_sta)
2204 return;
2205
2206 sta_priv = (void *)sta->drv_priv;
2207 ctx = sta_priv->common.ctx;
2208
2209 i = lq_sta->last_txrate_idx;
2210
2211 valid_tx_ant = priv->hw_params.valid_tx_ant;
2212
2213 if (!lq_sta->search_better_tbl)
2214 active_tbl = lq_sta->active_tbl;
2215 else
2216 active_tbl = 1 - lq_sta->active_tbl;
2217
2218 tbl = &(lq_sta->lq_info[active_tbl]);
2219
2220 if ((i < 0) || (i >= IWL_RATE_COUNT))
2221 i = 0;
2222
2223 rate = iwlegacy_rates[i].plcp;
2224 tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
2225 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2226
2227 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2228 rate |= RATE_MCS_CCK_MSK;
2229
2230 iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2231 if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2232 iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2233
2234 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2235 tbl->current_rate = rate;
2236 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
2237 iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2238 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2239 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2240}
2241
2242static void
2243iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2244 struct ieee80211_tx_rate_control *txrc)
2245{
2246
2247 struct sk_buff *skb = txrc->skb;
2248 struct ieee80211_supported_band *sband = txrc->sband;
2249 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2250 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2251 struct iwl_lq_sta *lq_sta = priv_sta;
2252 int rate_idx;
2253
2254 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2255
2256 /* Get max rate if user set max rate */
2257 if (lq_sta) {
2258 lq_sta->max_rate_idx = txrc->max_rate_idx;
2259 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2260 (lq_sta->max_rate_idx != -1))
2261 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2262 if ((lq_sta->max_rate_idx < 0) ||
2263 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2264 lq_sta->max_rate_idx = -1;
2265 }
2266
2267 /* Treat uninitialized rate scaling data same as non-existing. */
2268 if (lq_sta && !lq_sta->drv) {
2269 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2270 priv_sta = NULL;
2271 }
2272
2273 /* Send management frames and NO_ACK data using lowest rate. */
2274 if (rate_control_send_low(sta, priv_sta, txrc))
2275 return;
2276
2277 if (!lq_sta)
2278 return;
2279
2280 rate_idx = lq_sta->last_txrate_idx;
2281
2282 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2283 rate_idx -= IWL_FIRST_OFDM_RATE;
2284 /* 6M and 9M shared same MCS index */
2285 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2286 if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2287 IWL_RATE_MIMO2_6M_PLCP)
2288 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2289 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2290 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2291 info->control.rates[0].flags |=
2292 IEEE80211_TX_RC_SHORT_GI;
2293 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2294 info->control.rates[0].flags |=
2295 IEEE80211_TX_RC_DUP_DATA;
2296 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2297 info->control.rates[0].flags |=
2298 IEEE80211_TX_RC_40_MHZ_WIDTH;
2299 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2300 info->control.rates[0].flags |=
2301 IEEE80211_TX_RC_GREEN_FIELD;
2302 } else {
2303 /* Check for invalid rates */
2304 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2305 ((sband->band == IEEE80211_BAND_5GHZ) &&
2306 (rate_idx < IWL_FIRST_OFDM_RATE)))
2307 rate_idx = rate_lowest_index(sband, sta);
2308 /* On valid 5 GHz rate, adjust index */
2309 else if (sband->band == IEEE80211_BAND_5GHZ)
2310 rate_idx -= IWL_FIRST_OFDM_RATE;
2311 info->control.rates[0].flags = 0;
2312 }
2313 info->control.rates[0].idx = rate_idx;
2314
2315}
2316
2317static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2318 gfp_t gfp)
2319{
2320 struct iwl_lq_sta *lq_sta;
2321 struct iwl_station_priv *sta_priv =
2322 (struct iwl_station_priv *) sta->drv_priv;
2323 struct iwl_priv *priv;
2324
2325 priv = (struct iwl_priv *)priv_rate;
2326 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2327
2328 lq_sta = &sta_priv->lq_sta;
2329
2330 return lq_sta;
2331}
2332
2333/*
2334 * Called after adding a new station to initialize rate scaling
2335 */
2336void
2337iwl4965_rs_rate_init(struct iwl_priv *priv,
2338 struct ieee80211_sta *sta,
2339 u8 sta_id)
2340{
2341 int i, j;
2342 struct ieee80211_hw *hw = priv->hw;
2343 struct ieee80211_conf *conf = &priv->hw->conf;
2344 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2345 struct iwl_station_priv *sta_priv;
2346 struct iwl_lq_sta *lq_sta;
2347 struct ieee80211_supported_band *sband;
2348
2349 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2350 lq_sta = &sta_priv->lq_sta;
2351 sband = hw->wiphy->bands[conf->channel->band];
2352
2353
2354 lq_sta->lq.sta_id = sta_id;
2355
2356 for (j = 0; j < LQ_SIZE; j++)
2357 for (i = 0; i < IWL_RATE_COUNT; i++)
2358 iwl4965_rs_rate_scale_clear_window(
2359 &lq_sta->lq_info[j].win[i]);
2360
2361 lq_sta->flush_timer = 0;
2362 lq_sta->supp_rates = sta->supp_rates[sband->band];
2363 for (j = 0; j < LQ_SIZE; j++)
2364 for (i = 0; i < IWL_RATE_COUNT; i++)
2365 iwl4965_rs_rate_scale_clear_window(
2366 &lq_sta->lq_info[j].win[i]);
2367
2368 IWL_DEBUG_RATE(priv, "LQ:"
2369 "*** rate scale station global init for station %d ***\n",
2370 sta_id);
2371 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2372 * the lowest or the highest rate.. Could consider using RSSI from
2373 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2374 * after assoc.. */
2375
2376 lq_sta->is_dup = 0;
2377 lq_sta->max_rate_idx = -1;
2378 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2379 lq_sta->is_green = iwl4965_rs_use_green(sta);
2380 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2381 lq_sta->band = priv->band;
2382 /*
2383 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2384 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2385 */
2386 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2387 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2388 lq_sta->active_siso_rate &= ~((u16)0x2);
2389 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2390
2391 /* Same here */
2392 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2393 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2394 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2395 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2396
2397 /* These values will be overridden later */
2398 lq_sta->lq.general_params.single_stream_ant_msk =
2399 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2400 lq_sta->lq.general_params.dual_stream_ant_msk =
2401 priv->hw_params.valid_tx_ant &
2402 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2403 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2404 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2405 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2406 lq_sta->lq.general_params.dual_stream_ant_msk =
2407 priv->hw_params.valid_tx_ant;
2408 }
2409
2410 /* as default allow aggregation for all tids */
2411 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2412 lq_sta->drv = priv;
2413
2414 /* Set last_txrate_idx to lowest rate */
2415 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2416 if (sband->band == IEEE80211_BAND_5GHZ)
2417 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2418 lq_sta->is_agg = 0;
2419
2420#ifdef CONFIG_MAC80211_DEBUGFS
2421 lq_sta->dbg_fixed_rate = 0;
2422#endif
2423
2424 iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
2425}
2426
2427static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
2428 struct iwl_lq_sta *lq_sta, u32 new_rate)
2429{
2430 struct iwl_scale_tbl_info tbl_type;
2431 int index = 0;
2432 int rate_idx;
2433 int repeat_rate = 0;
2434 u8 ant_toggle_cnt = 0;
2435 u8 use_ht_possible = 1;
2436 u8 valid_tx_ant = 0;
2437 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2438
2439 /* Override starting rate (index 0) if needed for debug purposes */
2440 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2441
2442 /* Interpret new_rate (rate_n_flags) */
2443 iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2444 &tbl_type, &rate_idx);
2445
2446 /* How many times should we repeat the initial rate? */
2447 if (is_legacy(tbl_type.lq_type)) {
2448 ant_toggle_cnt = 1;
2449 repeat_rate = IWL_NUMBER_TRY;
2450 } else {
2451 repeat_rate = IWL_HT_NUMBER_TRY;
2452 }
2453
2454 lq_cmd->general_params.mimo_delimiter =
2455 is_mimo(tbl_type.lq_type) ? 1 : 0;
2456
2457 /* Fill 1st table entry (index 0) */
2458 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2459
2460 if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
2461 lq_cmd->general_params.single_stream_ant_msk =
2462 tbl_type.ant_type;
2463 } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
2464 lq_cmd->general_params.dual_stream_ant_msk =
2465 tbl_type.ant_type;
2466 } /* otherwise we don't modify the existing value */
2467
2468 index++;
2469 repeat_rate--;
2470 if (priv)
2471 valid_tx_ant = priv->hw_params.valid_tx_ant;
2472
2473 /* Fill rest of rate table */
2474 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2475 /* Repeat initial/next rate.
2476 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2477 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2478 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2479 if (is_legacy(tbl_type.lq_type)) {
2480 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2481 ant_toggle_cnt++;
2482 else if (priv &&
2483 iwl4965_rs_toggle_antenna(valid_tx_ant,
2484 &new_rate, &tbl_type))
2485 ant_toggle_cnt = 1;
2486 }
2487
2488 /* Override next rate if needed for debug purposes */
2489 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2490
2491 /* Fill next table entry */
2492 lq_cmd->rs_table[index].rate_n_flags =
2493 cpu_to_le32(new_rate);
2494 repeat_rate--;
2495 index++;
2496 }
2497
2498 iwl4965_rs_get_tbl_info_from_mcs(new_rate,
2499 lq_sta->band, &tbl_type,
2500 &rate_idx);
2501
2502 /* Indicate to uCode which entries might be MIMO.
2503 * If initial rate was MIMO, this will finally end up
2504 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2505 if (is_mimo(tbl_type.lq_type))
2506 lq_cmd->general_params.mimo_delimiter = index;
2507
2508 /* Get next rate */
2509 new_rate = iwl4965_rs_get_lower_rate(lq_sta,
2510 &tbl_type, rate_idx,
2511 use_ht_possible);
2512
2513 /* How many times should we repeat the next rate? */
2514 if (is_legacy(tbl_type.lq_type)) {
2515 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2516 ant_toggle_cnt++;
2517 else if (priv &&
2518 iwl4965_rs_toggle_antenna(valid_tx_ant,
2519 &new_rate, &tbl_type))
2520 ant_toggle_cnt = 1;
2521
2522 repeat_rate = IWL_NUMBER_TRY;
2523 } else {
2524 repeat_rate = IWL_HT_NUMBER_TRY;
2525 }
2526
2527 /* Don't allow HT rates after next pass.
2528 * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2529 use_ht_possible = 0;
2530
2531 /* Override next rate if needed for debug purposes */
2532 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2533
2534 /* Fill next table entry */
2535 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2536
2537 index++;
2538 repeat_rate--;
2539 }
2540
2541 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2542 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2543
2544 lq_cmd->agg_params.agg_time_limit =
2545 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2546}
2547
2548static void
2549*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2550{
2551 return hw->priv;
2552}
2553/* rate scale requires free function to be implemented */
2554static void iwl4965_rs_free(void *priv_rate)
2555{
2556 return;
2557}
2558
2559static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2560 void *priv_sta)
2561{
2562 struct iwl_priv *priv __maybe_unused = priv_r;
2563
2564 IWL_DEBUG_RATE(priv, "enter\n");
2565 IWL_DEBUG_RATE(priv, "leave\n");
2566}
2567
2568
2569#ifdef CONFIG_MAC80211_DEBUGFS
2570static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
2571{
2572 file->private_data = inode->i_private;
2573 return 0;
2574}
2575static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2576 u32 *rate_n_flags, int index)
2577{
2578 struct iwl_priv *priv;
2579 u8 valid_tx_ant;
2580 u8 ant_sel_tx;
2581
2582 priv = lq_sta->drv;
2583 valid_tx_ant = priv->hw_params.valid_tx_ant;
2584 if (lq_sta->dbg_fixed_rate) {
2585 ant_sel_tx =
2586 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
2587 >> RATE_MCS_ANT_POS);
2588 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2589 *rate_n_flags = lq_sta->dbg_fixed_rate;
2590 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
2591 } else {
2592 lq_sta->dbg_fixed_rate = 0;
2593 IWL_ERR(priv,
2594 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
2595 ant_sel_tx, valid_tx_ant);
2596 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2597 }
2598 } else {
2599 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2600 }
2601}
2602
2603static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2604 const char __user *user_buf, size_t count, loff_t *ppos)
2605{
2606 struct iwl_lq_sta *lq_sta = file->private_data;
2607 struct iwl_priv *priv;
2608 char buf[64];
2609 size_t buf_size;
2610 u32 parsed_rate;
2611 struct iwl_station_priv *sta_priv =
2612 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2613 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2614
2615 priv = lq_sta->drv;
2616 memset(buf, 0, sizeof(buf));
2617 buf_size = min(count, sizeof(buf) - 1);
2618 if (copy_from_user(buf, user_buf, buf_size))
2619 return -EFAULT;
2620
2621 if (sscanf(buf, "%x", &parsed_rate) == 1)
2622 lq_sta->dbg_fixed_rate = parsed_rate;
2623 else
2624 lq_sta->dbg_fixed_rate = 0;
2625
2626 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2627 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2628 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2629
2630 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
2631 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2632
2633 if (lq_sta->dbg_fixed_rate) {
2634 iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2635 iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
2636 false);
2637 }
2638
2639 return count;
2640}
2641
2642static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
2643 char __user *user_buf, size_t count, loff_t *ppos)
2644{
2645 char *buff;
2646 int desc = 0;
2647 int i = 0;
2648 int index = 0;
2649 ssize_t ret;
2650
2651 struct iwl_lq_sta *lq_sta = file->private_data;
2652 struct iwl_priv *priv;
2653 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2654
2655 priv = lq_sta->drv;
2656 buff = kmalloc(1024, GFP_KERNEL);
2657 if (!buff)
2658 return -ENOMEM;
2659
2660 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2661 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2662 lq_sta->total_failed, lq_sta->total_success,
2663 lq_sta->active_legacy_rate);
2664 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2665 lq_sta->dbg_fixed_rate);
2666 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2667 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2668 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2669 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2670 desc += sprintf(buff+desc, "lq type %s\n",
2671 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2672 if (is_Ht(tbl->lq_type)) {
2673 desc += sprintf(buff+desc, " %s",
2674 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2675 desc += sprintf(buff+desc, " %s",
2676 (tbl->is_ht40) ? "40MHz" : "20MHz");
2677 desc += sprintf(buff+desc, " %s %s %s\n",
2678 (tbl->is_SGI) ? "SGI" : "",
2679 (lq_sta->is_green) ? "GF enabled" : "",
2680 (lq_sta->is_agg) ? "AGG on" : "");
2681 }
2682 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2683 lq_sta->last_rate_n_flags);
2684 desc += sprintf(buff+desc, "general:"
2685 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2686 lq_sta->lq.general_params.flags,
2687 lq_sta->lq.general_params.mimo_delimiter,
2688 lq_sta->lq.general_params.single_stream_ant_msk,
2689 lq_sta->lq.general_params.dual_stream_ant_msk);
2690
2691 desc += sprintf(buff+desc, "agg:"
2692 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2693 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2694 lq_sta->lq.agg_params.agg_dis_start_th,
2695 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2696
2697 desc += sprintf(buff+desc,
2698 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2699 lq_sta->lq.general_params.start_rate_index[0],
2700 lq_sta->lq.general_params.start_rate_index[1],
2701 lq_sta->lq.general_params.start_rate_index[2],
2702 lq_sta->lq.general_params.start_rate_index[3]);
2703
2704 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2705 index = iwl4965_hwrate_to_plcp_idx(
2706 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
2707 if (is_legacy(tbl->lq_type)) {
2708 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
2709 i,
2710 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2711 iwl_rate_mcs[index].mbps);
2712 } else {
2713 desc += sprintf(buff+desc,
2714 " rate[%d] 0x%X %smbps (%s)\n",
2715 i,
2716 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2717 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
2718 }
2719 }
2720
2721 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2722 kfree(buff);
2723 return ret;
2724}
2725
2726static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2727 .write = iwl4965_rs_sta_dbgfs_scale_table_write,
2728 .read = iwl4965_rs_sta_dbgfs_scale_table_read,
2729 .open = iwl4965_open_file_generic,
2730 .llseek = default_llseek,
2731};
2732static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
2733 char __user *user_buf, size_t count, loff_t *ppos)
2734{
2735 char *buff;
2736 int desc = 0;
2737 int i, j;
2738 ssize_t ret;
2739
2740 struct iwl_lq_sta *lq_sta = file->private_data;
2741
2742 buff = kmalloc(1024, GFP_KERNEL);
2743 if (!buff)
2744 return -ENOMEM;
2745
2746 for (i = 0; i < LQ_SIZE; i++) {
2747 desc += sprintf(buff+desc,
2748 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2749 "rate=0x%X\n",
2750 lq_sta->active_tbl == i ? "*" : "x",
2751 lq_sta->lq_info[i].lq_type,
2752 lq_sta->lq_info[i].is_SGI,
2753 lq_sta->lq_info[i].is_ht40,
2754 lq_sta->lq_info[i].is_dup,
2755 lq_sta->is_green,
2756 lq_sta->lq_info[i].current_rate);
2757 for (j = 0; j < IWL_RATE_COUNT; j++) {
2758 desc += sprintf(buff+desc,
2759 "counter=%d success=%d %%=%d\n",
2760 lq_sta->lq_info[i].win[j].counter,
2761 lq_sta->lq_info[i].win[j].success_counter,
2762 lq_sta->lq_info[i].win[j].success_ratio);
2763 }
2764 }
2765 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2766 kfree(buff);
2767 return ret;
2768}
2769
2770static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2771 .read = iwl4965_rs_sta_dbgfs_stats_table_read,
2772 .open = iwl4965_open_file_generic,
2773 .llseek = default_llseek,
2774};
2775
2776static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2777 char __user *user_buf, size_t count, loff_t *ppos)
2778{
2779 char buff[120];
2780 int desc = 0;
2781 ssize_t ret;
2782
2783 struct iwl_lq_sta *lq_sta = file->private_data;
2784 struct iwl_priv *priv;
2785 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2786
2787 priv = lq_sta->drv;
2788
2789 if (is_Ht(tbl->lq_type))
2790 desc += sprintf(buff+desc,
2791 "Bit Rate= %d Mb/s\n",
2792 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2793 else
2794 desc += sprintf(buff+desc,
2795 "Bit Rate= %d Mb/s\n",
2796 iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
2797
2798 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2799 return ret;
2800}
2801
2802static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2803 .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
2804 .open = iwl4965_open_file_generic,
2805 .llseek = default_llseek,
2806};
2807
2808static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
2809 struct dentry *dir)
2810{
2811 struct iwl_lq_sta *lq_sta = priv_sta;
2812 lq_sta->rs_sta_dbgfs_scale_table_file =
2813 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2814 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2815 lq_sta->rs_sta_dbgfs_stats_table_file =
2816 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2817 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2818 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2819 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
2820 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
2821 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2822 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2823 &lq_sta->tx_agg_tid_en);
2824
2825}
2826
2827static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
2828{
2829 struct iwl_lq_sta *lq_sta = priv_sta;
2830 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2831 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2832 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2833 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2834}
2835#endif
2836
2837/*
2838 * Initialization of rate scaling information is done by driver after
2839 * the station is added. Since mac80211 calls this function before a
2840 * station is added we ignore it.
2841 */
2842static void
2843iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2844 struct ieee80211_sta *sta, void *priv_sta)
2845{
2846}
2847static struct rate_control_ops rs_4965_ops = {
2848 .module = NULL,
2849 .name = IWL4965_RS_NAME,
2850 .tx_status = iwl4965_rs_tx_status,
2851 .get_rate = iwl4965_rs_get_rate,
2852 .rate_init = iwl4965_rs_rate_init_stub,
2853 .alloc = iwl4965_rs_alloc,
2854 .free = iwl4965_rs_free,
2855 .alloc_sta = iwl4965_rs_alloc_sta,
2856 .free_sta = iwl4965_rs_free_sta,
2857#ifdef CONFIG_MAC80211_DEBUGFS
2858 .add_sta_debugfs = iwl4965_rs_add_debugfs,
2859 .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
2860#endif
2861};
2862
2863int iwl4965_rate_control_register(void)
2864{
2865 return ieee80211_rate_control_register(&rs_4965_ops);
2866}
2867
2868void iwl4965_rate_control_unregister(void)
2869{
2870 ieee80211_rate_control_unregister(&rs_4965_ops);
2871}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
deleted file mode 100644
index 2b144bbfc3c5..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
+++ /dev/null
@@ -1,215 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-4965-calib.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41#include "iwl-4965-hw.h"
42#include "iwl-4965.h"
43
44void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
45 struct iwl_rx_mem_buffer *rxb)
46
47{
48 struct iwl_rx_packet *pkt = rxb_addr(rxb);
49 struct iwl_missed_beacon_notif *missed_beacon;
50
51 missed_beacon = &pkt->u.missed_beacon;
52 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
53 priv->missed_beacon_threshold) {
54 IWL_DEBUG_CALIB(priv,
55 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
56 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
57 le32_to_cpu(missed_beacon->total_missed_becons),
58 le32_to_cpu(missed_beacon->num_recvd_beacons),
59 le32_to_cpu(missed_beacon->num_expected_beacons));
60 if (!test_bit(STATUS_SCANNING, &priv->status))
61 iwl4965_init_sensitivity(priv);
62 }
63}
64
65/* Calculate noise level, based on measurements during network silence just
66 * before arriving beacon. This measurement can be done only if we know
67 * exactly when to expect beacons, therefore only when we're associated. */
68static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
69{
70 struct statistics_rx_non_phy *rx_info;
71 int num_active_rx = 0;
72 int total_silence = 0;
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise;
75
76 rx_info = &(priv->_4965.statistics.rx.general);
77 bcn_silence_a =
78 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
79 bcn_silence_b =
80 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
81 bcn_silence_c =
82 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
83
84 if (bcn_silence_a) {
85 total_silence += bcn_silence_a;
86 num_active_rx++;
87 }
88 if (bcn_silence_b) {
89 total_silence += bcn_silence_b;
90 num_active_rx++;
91 }
92 if (bcn_silence_c) {
93 total_silence += bcn_silence_c;
94 num_active_rx++;
95 }
96
97 /* Average among active antennas */
98 if (num_active_rx)
99 last_rx_noise = (total_silence / num_active_rx) - 107;
100 else
101 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
102
103 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
104 bcn_silence_a, bcn_silence_b, bcn_silence_c,
105 last_rx_noise);
106}
107
108#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
109/*
110 * based on the assumption of all statistics counter are in DWORD
111 * FIXME: This function is for debugging, do not deal with
112 * the case of counters roll-over.
113 */
114static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
115 __le32 *stats)
116{
117 int i, size;
118 __le32 *prev_stats;
119 u32 *accum_stats;
120 u32 *delta, *max_delta;
121 struct statistics_general_common *general, *accum_general;
122 struct statistics_tx *tx, *accum_tx;
123
124 prev_stats = (__le32 *)&priv->_4965.statistics;
125 accum_stats = (u32 *)&priv->_4965.accum_statistics;
126 size = sizeof(struct iwl_notif_statistics);
127 general = &priv->_4965.statistics.general.common;
128 accum_general = &priv->_4965.accum_statistics.general.common;
129 tx = &priv->_4965.statistics.tx;
130 accum_tx = &priv->_4965.accum_statistics.tx;
131 delta = (u32 *)&priv->_4965.delta_statistics;
132 max_delta = (u32 *)&priv->_4965.max_delta;
133
134 for (i = sizeof(__le32); i < size;
135 i += sizeof(__le32), stats++, prev_stats++, delta++,
136 max_delta++, accum_stats++) {
137 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
138 *delta = (le32_to_cpu(*stats) -
139 le32_to_cpu(*prev_stats));
140 *accum_stats += *delta;
141 if (*delta > *max_delta)
142 *max_delta = *delta;
143 }
144 }
145
146 /* reset accumulative statistics for "no-counter" type statistics */
147 accum_general->temperature = general->temperature;
148 accum_general->ttl_timestamp = general->ttl_timestamp;
149}
150#endif
151
152#define REG_RECALIB_PERIOD (60)
153
154void iwl4965_rx_statistics(struct iwl_priv *priv,
155 struct iwl_rx_mem_buffer *rxb)
156{
157 int change;
158 struct iwl_rx_packet *pkt = rxb_addr(rxb);
159
160 IWL_DEBUG_RX(priv,
161 "Statistics notification received (%d vs %d).\n",
162 (int)sizeof(struct iwl_notif_statistics),
163 le32_to_cpu(pkt->len_n_flags) &
164 FH_RSCSR_FRAME_SIZE_MSK);
165
166 change = ((priv->_4965.statistics.general.common.temperature !=
167 pkt->u.stats.general.common.temperature) ||
168 ((priv->_4965.statistics.flag &
169 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
170 (pkt->u.stats.flag &
171 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
172#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
173 iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
174#endif
175
176 /* TODO: reading some of statistics is unneeded */
177 memcpy(&priv->_4965.statistics, &pkt->u.stats,
178 sizeof(priv->_4965.statistics));
179
180 set_bit(STATUS_STATISTICS, &priv->status);
181
182 /* Reschedule the statistics timer to occur in
183 * REG_RECALIB_PERIOD seconds to ensure we get a
184 * thermal update even if the uCode doesn't give
185 * us one */
186 mod_timer(&priv->statistics_periodic, jiffies +
187 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
188
189 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
190 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
191 iwl4965_rx_calc_noise(priv);
192 queue_work(priv->workqueue, &priv->run_time_calib_work);
193 }
194 if (priv->cfg->ops->lib->temp_ops.temperature && change)
195 priv->cfg->ops->lib->temp_ops.temperature(priv);
196}
197
198void iwl4965_reply_statistics(struct iwl_priv *priv,
199 struct iwl_rx_mem_buffer *rxb)
200{
201 struct iwl_rx_packet *pkt = rxb_addr(rxb);
202
203 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
204#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
205 memset(&priv->_4965.accum_statistics, 0,
206 sizeof(struct iwl_notif_statistics));
207 memset(&priv->_4965.delta_statistics, 0,
208 sizeof(struct iwl_notif_statistics));
209 memset(&priv->_4965.max_delta, 0,
210 sizeof(struct iwl_notif_statistics));
211#endif
212 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
213 }
214 iwl4965_rx_statistics(priv, rxb);
215}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
deleted file mode 100644
index a262c23553d2..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+++ /dev/null
@@ -1,721 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-4965.h"
36
37static struct iwl_link_quality_cmd *
38iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
39{
40 int i, r;
41 struct iwl_link_quality_cmd *link_cmd;
42 u32 rate_flags = 0;
43 __le32 rate_n_flags;
44
45 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
46 if (!link_cmd) {
47 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
48 return NULL;
49 }
50 /* Set up the rate scaling to start at selected rate, fall back
51 * all the way down to 1M in IEEE order, and then spin on 1M */
52 if (priv->band == IEEE80211_BAND_5GHZ)
53 r = IWL_RATE_6M_INDEX;
54 else
55 r = IWL_RATE_1M_INDEX;
56
57 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
58 rate_flags |= RATE_MCS_CCK_MSK;
59
60 rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
61 RATE_MCS_ANT_POS;
62 rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
63 rate_flags);
64 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
65 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
66
67 link_cmd->general_params.single_stream_ant_msk =
68 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
69
70 link_cmd->general_params.dual_stream_ant_msk =
71 priv->hw_params.valid_tx_ant &
72 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
73 if (!link_cmd->general_params.dual_stream_ant_msk) {
74 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
75 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
76 link_cmd->general_params.dual_stream_ant_msk =
77 priv->hw_params.valid_tx_ant;
78 }
79
80 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
81 link_cmd->agg_params.agg_time_limit =
82 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
83
84 link_cmd->sta_id = sta_id;
85
86 return link_cmd;
87}
88
89/*
90 * iwl4965_add_bssid_station - Add the special IBSS BSSID station
91 *
92 * Function sleeps.
93 */
94int
95iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
96 const u8 *addr, u8 *sta_id_r)
97{
98 int ret;
99 u8 sta_id;
100 struct iwl_link_quality_cmd *link_cmd;
101 unsigned long flags;
102
103 if (sta_id_r)
104 *sta_id_r = IWL_INVALID_STATION;
105
106 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
107 if (ret) {
108 IWL_ERR(priv, "Unable to add station %pM\n", addr);
109 return ret;
110 }
111
112 if (sta_id_r)
113 *sta_id_r = sta_id;
114
115 spin_lock_irqsave(&priv->sta_lock, flags);
116 priv->stations[sta_id].used |= IWL_STA_LOCAL;
117 spin_unlock_irqrestore(&priv->sta_lock, flags);
118
119 /* Set up default rate scaling table in device's station table */
120 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
121 if (!link_cmd) {
122 IWL_ERR(priv,
123 "Unable to initialize rate scaling for station %pM.\n",
124 addr);
125 return -ENOMEM;
126 }
127
128 ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
129 if (ret)
130 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
131
132 spin_lock_irqsave(&priv->sta_lock, flags);
133 priv->stations[sta_id].lq = link_cmd;
134 spin_unlock_irqrestore(&priv->sta_lock, flags);
135
136 return 0;
137}
138
139static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
140 struct iwl_rxon_context *ctx,
141 bool send_if_empty)
142{
143 int i, not_empty = 0;
144 u8 buff[sizeof(struct iwl_wep_cmd) +
145 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
146 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
147 size_t cmd_size = sizeof(struct iwl_wep_cmd);
148 struct iwl_host_cmd cmd = {
149 .id = ctx->wep_key_cmd,
150 .data = wep_cmd,
151 .flags = CMD_SYNC,
152 };
153
154 might_sleep();
155
156 memset(wep_cmd, 0, cmd_size +
157 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
158
159 for (i = 0; i < WEP_KEYS_MAX ; i++) {
160 wep_cmd->key[i].key_index = i;
161 if (ctx->wep_keys[i].key_size) {
162 wep_cmd->key[i].key_offset = i;
163 not_empty = 1;
164 } else {
165 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
166 }
167
168 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
169 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
170 ctx->wep_keys[i].key_size);
171 }
172
173 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
174 wep_cmd->num_keys = WEP_KEYS_MAX;
175
176 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
177
178 cmd.len = cmd_size;
179
180 if (not_empty || send_if_empty)
181 return iwl_legacy_send_cmd(priv, &cmd);
182 else
183 return 0;
184}
185
186int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
187 struct iwl_rxon_context *ctx)
188{
189 lockdep_assert_held(&priv->mutex);
190
191 return iwl4965_static_wepkey_cmd(priv, ctx, false);
192}
193
194int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx,
196 struct ieee80211_key_conf *keyconf)
197{
198 int ret;
199
200 lockdep_assert_held(&priv->mutex);
201
202 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
203 keyconf->keyidx);
204
205 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
206 if (iwl_legacy_is_rfkill(priv)) {
207 IWL_DEBUG_WEP(priv,
208 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
209 /* but keys in device are clear anyway so return success */
210 return 0;
211 }
212 ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
213 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
214 keyconf->keyidx, ret);
215
216 return ret;
217}
218
219int iwl4965_set_default_wep_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf)
222{
223 int ret;
224
225 lockdep_assert_held(&priv->mutex);
226
227 if (keyconf->keylen != WEP_KEY_LEN_128 &&
228 keyconf->keylen != WEP_KEY_LEN_64) {
229 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
230 return -EINVAL;
231 }
232
233 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
234 keyconf->hw_key_idx = HW_KEY_DEFAULT;
235 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
236
237 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
238 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
239 keyconf->keylen);
240
241 ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
242 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
243 keyconf->keylen, keyconf->keyidx, ret);
244
245 return ret;
246}
247
248static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
249 struct iwl_rxon_context *ctx,
250 struct ieee80211_key_conf *keyconf,
251 u8 sta_id)
252{
253 unsigned long flags;
254 __le16 key_flags = 0;
255 struct iwl_legacy_addsta_cmd sta_cmd;
256
257 lockdep_assert_held(&priv->mutex);
258
259 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
260
261 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
262 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
263 key_flags &= ~STA_KEY_FLG_INVALID;
264
265 if (keyconf->keylen == WEP_KEY_LEN_128)
266 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
267
268 if (sta_id == ctx->bcast_sta_id)
269 key_flags |= STA_KEY_MULTICAST_MSK;
270
271 spin_lock_irqsave(&priv->sta_lock, flags);
272
273 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
274 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
275 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
276
277 memcpy(priv->stations[sta_id].keyinfo.key,
278 keyconf->key, keyconf->keylen);
279
280 memcpy(&priv->stations[sta_id].sta.key.key[3],
281 keyconf->key, keyconf->keylen);
282
283 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
284 == STA_KEY_FLG_NO_ENC)
285 priv->stations[sta_id].sta.key.key_offset =
286 iwl_legacy_get_free_ucode_key_index(priv);
287 /* else, we are overriding an existing key => no need to allocated room
288 * in uCode. */
289
290 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
291 "no space for a new key");
292
293 priv->stations[sta_id].sta.key.key_flags = key_flags;
294 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
295 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
296
297 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
298 sizeof(struct iwl_legacy_addsta_cmd));
299 spin_unlock_irqrestore(&priv->sta_lock, flags);
300
301 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
302}
303
304static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_key_conf *keyconf,
307 u8 sta_id)
308{
309 unsigned long flags;
310 __le16 key_flags = 0;
311 struct iwl_legacy_addsta_cmd sta_cmd;
312
313 lockdep_assert_held(&priv->mutex);
314
315 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
316 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
317 key_flags &= ~STA_KEY_FLG_INVALID;
318
319 if (sta_id == ctx->bcast_sta_id)
320 key_flags |= STA_KEY_MULTICAST_MSK;
321
322 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
323
324 spin_lock_irqsave(&priv->sta_lock, flags);
325 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
326 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
327
328 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
329 keyconf->keylen);
330
331 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
332 keyconf->keylen);
333
334 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
335 == STA_KEY_FLG_NO_ENC)
336 priv->stations[sta_id].sta.key.key_offset =
337 iwl_legacy_get_free_ucode_key_index(priv);
338 /* else, we are overriding an existing key => no need to allocated room
339 * in uCode. */
340
341 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
342 "no space for a new key");
343
344 priv->stations[sta_id].sta.key.key_flags = key_flags;
345 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
346 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
347
348 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
349 sizeof(struct iwl_legacy_addsta_cmd));
350 spin_unlock_irqrestore(&priv->sta_lock, flags);
351
352 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
353}
354
355static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
356 struct iwl_rxon_context *ctx,
357 struct ieee80211_key_conf *keyconf,
358 u8 sta_id)
359{
360 unsigned long flags;
361 int ret = 0;
362 __le16 key_flags = 0;
363
364 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
365 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
366 key_flags &= ~STA_KEY_FLG_INVALID;
367
368 if (sta_id == ctx->bcast_sta_id)
369 key_flags |= STA_KEY_MULTICAST_MSK;
370
371 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
372 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
373
374 spin_lock_irqsave(&priv->sta_lock, flags);
375
376 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
377 priv->stations[sta_id].keyinfo.keylen = 16;
378
379 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
380 == STA_KEY_FLG_NO_ENC)
381 priv->stations[sta_id].sta.key.key_offset =
382 iwl_legacy_get_free_ucode_key_index(priv);
383 /* else, we are overriding an existing key => no need to allocated room
384 * in uCode. */
385
386 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
387 "no space for a new key");
388
389 priv->stations[sta_id].sta.key.key_flags = key_flags;
390
391
392 /* This copy is acutally not needed: we get the key with each TX */
393 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
394
395 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
396
397 spin_unlock_irqrestore(&priv->sta_lock, flags);
398
399 return ret;
400}
401
402void iwl4965_update_tkip_key(struct iwl_priv *priv,
403 struct iwl_rxon_context *ctx,
404 struct ieee80211_key_conf *keyconf,
405 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
406{
407 u8 sta_id;
408 unsigned long flags;
409 int i;
410
411 if (iwl_legacy_scan_cancel(priv)) {
412 /* cancel scan failed, just live w/ bad key and rely
413 briefly on SW decryption */
414 return;
415 }
416
417 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
418 if (sta_id == IWL_INVALID_STATION)
419 return;
420
421 spin_lock_irqsave(&priv->sta_lock, flags);
422
423 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
424
425 for (i = 0; i < 5; i++)
426 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
427 cpu_to_le16(phase1key[i]);
428
429 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
430 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
431
432 iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
433
434 spin_unlock_irqrestore(&priv->sta_lock, flags);
435
436}
437
438int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
439 struct iwl_rxon_context *ctx,
440 struct ieee80211_key_conf *keyconf,
441 u8 sta_id)
442{
443 unsigned long flags;
444 u16 key_flags;
445 u8 keyidx;
446 struct iwl_legacy_addsta_cmd sta_cmd;
447
448 lockdep_assert_held(&priv->mutex);
449
450 ctx->key_mapping_keys--;
451
452 spin_lock_irqsave(&priv->sta_lock, flags);
453 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
454 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
455
456 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
457 keyconf->keyidx, sta_id);
458
459 if (keyconf->keyidx != keyidx) {
460 /* We need to remove a key with index different that the one
461 * in the uCode. This means that the key we need to remove has
462 * been replaced by another one with different index.
463 * Don't do anything and return ok
464 */
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466 return 0;
467 }
468
469 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
470 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
471 keyconf->keyidx, key_flags);
472 spin_unlock_irqrestore(&priv->sta_lock, flags);
473 return 0;
474 }
475
476 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
477 &priv->ucode_key_table))
478 IWL_ERR(priv, "index %d not used in uCode key table.\n",
479 priv->stations[sta_id].sta.key.key_offset);
480 memset(&priv->stations[sta_id].keyinfo, 0,
481 sizeof(struct iwl_hw_key));
482 memset(&priv->stations[sta_id].sta.key, 0,
483 sizeof(struct iwl4965_keyinfo));
484 priv->stations[sta_id].sta.key.key_flags =
485 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
486 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
487 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
488 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
489
490 if (iwl_legacy_is_rfkill(priv)) {
491 IWL_DEBUG_WEP(priv,
492 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
493 spin_unlock_irqrestore(&priv->sta_lock, flags);
494 return 0;
495 }
496 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
497 sizeof(struct iwl_legacy_addsta_cmd));
498 spin_unlock_irqrestore(&priv->sta_lock, flags);
499
500 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
501}
502
503int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
504 struct ieee80211_key_conf *keyconf, u8 sta_id)
505{
506 int ret;
507
508 lockdep_assert_held(&priv->mutex);
509
510 ctx->key_mapping_keys++;
511 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
512
513 switch (keyconf->cipher) {
514 case WLAN_CIPHER_SUITE_CCMP:
515 ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
516 keyconf, sta_id);
517 break;
518 case WLAN_CIPHER_SUITE_TKIP:
519 ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
520 keyconf, sta_id);
521 break;
522 case WLAN_CIPHER_SUITE_WEP40:
523 case WLAN_CIPHER_SUITE_WEP104:
524 ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
525 keyconf, sta_id);
526 break;
527 default:
528 IWL_ERR(priv,
529 "Unknown alg: %s cipher = %x\n", __func__,
530 keyconf->cipher);
531 ret = -EINVAL;
532 }
533
534 IWL_DEBUG_WEP(priv,
535 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
536 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
537 sta_id, ret);
538
539 return ret;
540}
541
542/**
543 * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
544 *
545 * This adds the broadcast station into the driver's station table
546 * and marks it driver active, so that it will be restored to the
547 * device at the next best time.
548 */
549int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
550 struct iwl_rxon_context *ctx)
551{
552 struct iwl_link_quality_cmd *link_cmd;
553 unsigned long flags;
554 u8 sta_id;
555
556 spin_lock_irqsave(&priv->sta_lock, flags);
557 sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
558 false, NULL);
559 if (sta_id == IWL_INVALID_STATION) {
560 IWL_ERR(priv, "Unable to prepare broadcast station\n");
561 spin_unlock_irqrestore(&priv->sta_lock, flags);
562
563 return -EINVAL;
564 }
565
566 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
567 priv->stations[sta_id].used |= IWL_STA_BCAST;
568 spin_unlock_irqrestore(&priv->sta_lock, flags);
569
570 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
571 if (!link_cmd) {
572 IWL_ERR(priv,
573 "Unable to initialize rate scaling for bcast station.\n");
574 return -ENOMEM;
575 }
576
577 spin_lock_irqsave(&priv->sta_lock, flags);
578 priv->stations[sta_id].lq = link_cmd;
579 spin_unlock_irqrestore(&priv->sta_lock, flags);
580
581 return 0;
582}
583
584/**
585 * iwl4965_update_bcast_station - update broadcast station's LQ command
586 *
587 * Only used by iwl4965. Placed here to have all bcast station management
588 * code together.
589 */
590static int iwl4965_update_bcast_station(struct iwl_priv *priv,
591 struct iwl_rxon_context *ctx)
592{
593 unsigned long flags;
594 struct iwl_link_quality_cmd *link_cmd;
595 u8 sta_id = ctx->bcast_sta_id;
596
597 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
598 if (!link_cmd) {
599 IWL_ERR(priv,
600 "Unable to initialize rate scaling for bcast station.\n");
601 return -ENOMEM;
602 }
603
604 spin_lock_irqsave(&priv->sta_lock, flags);
605 if (priv->stations[sta_id].lq)
606 kfree(priv->stations[sta_id].lq);
607 else
608 IWL_DEBUG_INFO(priv,
609 "Bcast station rate scaling has not been initialized yet.\n");
610 priv->stations[sta_id].lq = link_cmd;
611 spin_unlock_irqrestore(&priv->sta_lock, flags);
612
613 return 0;
614}
615
616int iwl4965_update_bcast_stations(struct iwl_priv *priv)
617{
618 struct iwl_rxon_context *ctx;
619 int ret = 0;
620
621 for_each_context(priv, ctx) {
622 ret = iwl4965_update_bcast_station(priv, ctx);
623 if (ret)
624 break;
625 }
626
627 return ret;
628}
629
630/**
631 * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
632 */
633int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
634{
635 unsigned long flags;
636 struct iwl_legacy_addsta_cmd sta_cmd;
637
638 lockdep_assert_held(&priv->mutex);
639
640 /* Remove "disable" flag, to enable Tx for this TID */
641 spin_lock_irqsave(&priv->sta_lock, flags);
642 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
643 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
644 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
646 sizeof(struct iwl_legacy_addsta_cmd));
647 spin_unlock_irqrestore(&priv->sta_lock, flags);
648
649 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
650}
651
652int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
653 int tid, u16 ssn)
654{
655 unsigned long flags;
656 int sta_id;
657 struct iwl_legacy_addsta_cmd sta_cmd;
658
659 lockdep_assert_held(&priv->mutex);
660
661 sta_id = iwl_legacy_sta_id(sta);
662 if (sta_id == IWL_INVALID_STATION)
663 return -ENXIO;
664
665 spin_lock_irqsave(&priv->sta_lock, flags);
666 priv->stations[sta_id].sta.station_flags_msk = 0;
667 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
668 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
669 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
670 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
671 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
672 sizeof(struct iwl_legacy_addsta_cmd));
673 spin_unlock_irqrestore(&priv->sta_lock, flags);
674
675 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
676}
677
678int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
679 int tid)
680{
681 unsigned long flags;
682 int sta_id;
683 struct iwl_legacy_addsta_cmd sta_cmd;
684
685 lockdep_assert_held(&priv->mutex);
686
687 sta_id = iwl_legacy_sta_id(sta);
688 if (sta_id == IWL_INVALID_STATION) {
689 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
690 return -ENXIO;
691 }
692
693 spin_lock_irqsave(&priv->sta_lock, flags);
694 priv->stations[sta_id].sta.station_flags_msk = 0;
695 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
696 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
697 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
698 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
699 sizeof(struct iwl_legacy_addsta_cmd));
700 spin_unlock_irqrestore(&priv->sta_lock, flags);
701
702 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
703}
704
705void
706iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
707{
708 unsigned long flags;
709
710 spin_lock_irqsave(&priv->sta_lock, flags);
711 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
712 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
713 priv->stations[sta_id].sta.sta.modify_mask =
714 STA_MODIFY_SLEEP_TX_COUNT_MSK;
715 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
716 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
717 iwl_legacy_send_add_sta(priv,
718 &priv->stations[sta_id].sta, CMD_ASYNC);
719 spin_unlock_irqrestore(&priv->sta_lock, flags);
720
721}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
deleted file mode 100644
index 7f12e3638bae..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ /dev/null
@@ -1,1378 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-4965-hw.h"
41#include "iwl-4965.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80static inline int iwl4965_get_ac_from_tid(u16 tid)
81{
82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 return tid_to_ac[tid];
84
85 /* no support for TIDs 8-15 yet */
86 return -EINVAL;
87}
88
89static inline int
90iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99/*
100 * handle build REPLY_TX command notification.
101 */
102static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
103 struct sk_buff *skb,
104 struct iwl_tx_cmd *tx_cmd,
105 struct ieee80211_tx_info *info,
106 struct ieee80211_hdr *hdr,
107 u8 std_id)
108{
109 __le16 fc = hdr->frame_control;
110 __le32 tx_flags = tx_cmd->tx_flags;
111
112 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
113 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
114 tx_flags |= TX_CMD_FLG_ACK_MSK;
115 if (ieee80211_is_mgmt(fc))
116 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
117 if (ieee80211_is_probe_resp(fc) &&
118 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
119 tx_flags |= TX_CMD_FLG_TSF_MSK;
120 } else {
121 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
122 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
123 }
124
125 if (ieee80211_is_back_req(fc))
126 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
127
128 tx_cmd->sta_id = std_id;
129 if (ieee80211_has_morefrags(fc))
130 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
131
132 if (ieee80211_is_data_qos(fc)) {
133 u8 *qc = ieee80211_get_qos_ctl(hdr);
134 tx_cmd->tid_tspec = qc[0] & 0xf;
135 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
136 } else {
137 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
138 }
139
140 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
141
142 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
143 if (ieee80211_is_mgmt(fc)) {
144 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
145 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
146 else
147 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
148 } else {
149 tx_cmd->timeout.pm_frame_timeout = 0;
150 }
151
152 tx_cmd->driver_txop = 0;
153 tx_cmd->tx_flags = tx_flags;
154 tx_cmd->next_frame_len = 0;
155}
156
157#define RTS_DFAULT_RETRY_LIMIT 60
158
159static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
160 struct iwl_tx_cmd *tx_cmd,
161 struct ieee80211_tx_info *info,
162 __le16 fc)
163{
164 u32 rate_flags;
165 int rate_idx;
166 u8 rts_retry_limit;
167 u8 data_retry_limit;
168 u8 rate_plcp;
169
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc))
172 data_retry_limit = 3;
173 else
174 data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
175 tx_cmd->data_retry_limit = data_retry_limit;
176
177 /* Set retry limit on RTS packets */
178 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
179 if (data_retry_limit < rts_retry_limit)
180 rts_retry_limit = data_retry_limit;
181 tx_cmd->rts_retry_limit = rts_retry_limit;
182
183 /* DATA packets will use the uCode station table for rate/antenna
184 * selection */
185 if (ieee80211_is_data(fc)) {
186 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
188 return;
189 }
190
191 /**
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
195 * index is invalid.
196 */
197 rate_idx = info->control.rates[0].idx;
198 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
199 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
200 rate_idx = rate_lowest_index(&priv->bands[info->band],
201 info->control.sta);
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info->band == IEEE80211_BAND_5GHZ)
204 rate_idx += IWL_FIRST_OFDM_RATE;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp = iwlegacy_rates[rate_idx].plcp;
207 /* Zero out flags for this packet */
208 rate_flags = 0;
209
210 /* Set CCK flag as needed */
211 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
212 rate_flags |= RATE_MCS_CCK_MSK;
213
214 /* Set up antennas */
215 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
216 priv->hw_params.valid_tx_ant);
217
218 rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
219
220 /* Set the rate in the TX cmd */
221 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
222}
223
224static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
225 struct ieee80211_tx_info *info,
226 struct iwl_tx_cmd *tx_cmd,
227 struct sk_buff *skb_frag,
228 int sta_id)
229{
230 struct ieee80211_key_conf *keyconf = info->control.hw_key;
231
232 switch (keyconf->cipher) {
233 case WLAN_CIPHER_SUITE_CCMP:
234 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
235 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
238 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
239 break;
240
241 case WLAN_CIPHER_SUITE_TKIP:
242 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
243 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
244 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
245 break;
246
247 case WLAN_CIPHER_SUITE_WEP104:
248 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
249 /* fall through */
250 case WLAN_CIPHER_SUITE_WEP40:
251 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
252 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
253
254 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
255
256 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
257 "with key %d\n", keyconf->keyidx);
258 break;
259
260 default:
261 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
262 break;
263 }
264}
265
266/*
267 * start REPLY_TX command process
268 */
269int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
270{
271 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
272 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
273 struct ieee80211_sta *sta = info->control.sta;
274 struct iwl_station_priv *sta_priv = NULL;
275 struct iwl_tx_queue *txq;
276 struct iwl_queue *q;
277 struct iwl_device_cmd *out_cmd;
278 struct iwl_cmd_meta *out_meta;
279 struct iwl_tx_cmd *tx_cmd;
280 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
281 int txq_id;
282 dma_addr_t phys_addr;
283 dma_addr_t txcmd_phys;
284 dma_addr_t scratch_phys;
285 u16 len, firstlen, secondlen;
286 u16 seq_number = 0;
287 __le16 fc;
288 u8 hdr_len;
289 u8 sta_id;
290 u8 wait_write_ptr = 0;
291 u8 tid = 0;
292 u8 *qc = NULL;
293 unsigned long flags;
294 bool is_agg = false;
295
296 if (info->control.vif)
297 ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
298
299 spin_lock_irqsave(&priv->lock, flags);
300 if (iwl_legacy_is_rfkill(priv)) {
301 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
302 goto drop_unlock;
303 }
304
305 fc = hdr->frame_control;
306
307#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
308 if (ieee80211_is_auth(fc))
309 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
310 else if (ieee80211_is_assoc_req(fc))
311 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
312 else if (ieee80211_is_reassoc_req(fc))
313 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
314#endif
315
316 hdr_len = ieee80211_hdrlen(fc);
317
318 /* For management frames use broadcast id to do not break aggregation */
319 if (!ieee80211_is_data(fc))
320 sta_id = ctx->bcast_sta_id;
321 else {
322 /* Find index into station table for destination station */
323 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
324
325 if (sta_id == IWL_INVALID_STATION) {
326 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
327 hdr->addr1);
328 goto drop_unlock;
329 }
330 }
331
332 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
333
334 if (sta)
335 sta_priv = (void *)sta->drv_priv;
336
337 if (sta_priv && sta_priv->asleep &&
338 (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
339 /*
340 * This sends an asynchronous command to the device,
341 * but we can rely on it being processed before the
342 * next frame is processed -- and the next frame to
343 * this station is the one that will consume this
344 * counter.
345 * For now set the counter to just 1 since we do not
346 * support uAPSD yet.
347 */
348 iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
349 }
350
351 /*
352 * Send this frame after DTIM -- there's a special queue
353 * reserved for this for contexts that support AP mode.
354 */
355 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
356 txq_id = ctx->mcast_queue;
357 /*
358 * The microcode will clear the more data
359 * bit in the last frame it transmits.
360 */
361 hdr->frame_control |=
362 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
363 } else
364 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
365
366 /* irqs already disabled/saved above when locking priv->lock */
367 spin_lock(&priv->sta_lock);
368
369 if (ieee80211_is_data_qos(fc)) {
370 qc = ieee80211_get_qos_ctl(hdr);
371 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
372 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
373 spin_unlock(&priv->sta_lock);
374 goto drop_unlock;
375 }
376 seq_number = priv->stations[sta_id].tid[tid].seq_number;
377 seq_number &= IEEE80211_SCTL_SEQ;
378 hdr->seq_ctrl = hdr->seq_ctrl &
379 cpu_to_le16(IEEE80211_SCTL_FRAG);
380 hdr->seq_ctrl |= cpu_to_le16(seq_number);
381 seq_number += 0x10;
382 /* aggregation is on for this <sta,tid> */
383 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
384 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
385 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
386 is_agg = true;
387 }
388 }
389
390 txq = &priv->txq[txq_id];
391 q = &txq->q;
392
393 if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
394 spin_unlock(&priv->sta_lock);
395 goto drop_unlock;
396 }
397
398 if (ieee80211_is_data_qos(fc)) {
399 priv->stations[sta_id].tid[tid].tfds_in_queue++;
400 if (!ieee80211_has_morefrags(fc))
401 priv->stations[sta_id].tid[tid].seq_number = seq_number;
402 }
403
404 spin_unlock(&priv->sta_lock);
405
406 /* Set up driver data for this TFD */
407 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
408 txq->txb[q->write_ptr].skb = skb;
409 txq->txb[q->write_ptr].ctx = ctx;
410
411 /* Set up first empty entry in queue's array of Tx/cmd buffers */
412 out_cmd = txq->cmd[q->write_ptr];
413 out_meta = &txq->meta[q->write_ptr];
414 tx_cmd = &out_cmd->cmd.tx;
415 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
416 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
417
418 /*
419 * Set up the Tx-command (not MAC!) header.
420 * Store the chosen Tx queue and TFD index within the sequence field;
421 * after Tx, uCode's Tx response will return this value so driver can
422 * locate the frame within the tx queue and do post-tx processing.
423 */
424 out_cmd->hdr.cmd = REPLY_TX;
425 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
426 INDEX_TO_SEQ(q->write_ptr)));
427
428 /* Copy MAC header from skb into command buffer */
429 memcpy(tx_cmd->hdr, hdr, hdr_len);
430
431
432 /* Total # bytes to be transmitted */
433 len = (u16)skb->len;
434 tx_cmd->len = cpu_to_le16(len);
435
436 if (info->control.hw_key)
437 iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
438
439 /* TODO need this for burst mode later on */
440 iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
441 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
442
443 iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
444
445 iwl_legacy_update_stats(priv, true, fc, len);
446 /*
447 * Use the first empty entry in this queue's command buffer array
448 * to contain the Tx command and MAC header concatenated together
449 * (payload data will be in another buffer).
450 * Size of this varies, due to varying MAC header length.
451 * If end is not dword aligned, we'll have 2 extra bytes at the end
452 * of the MAC header (device reads on dword boundaries).
453 * We'll tell device about this padding later.
454 */
455 len = sizeof(struct iwl_tx_cmd) +
456 sizeof(struct iwl_cmd_header) + hdr_len;
457 firstlen = (len + 3) & ~3;
458
459 /* Tell NIC about any 2-byte padding after MAC header */
460 if (firstlen != len)
461 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
462
463 /* Physical address of this Tx command's header (not MAC header!),
464 * within command buffer array. */
465 txcmd_phys = pci_map_single(priv->pci_dev,
466 &out_cmd->hdr, firstlen,
467 PCI_DMA_BIDIRECTIONAL);
468 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
469 dma_unmap_len_set(out_meta, len, firstlen);
470 /* Add buffer containing Tx command and MAC(!) header to TFD's
471 * first entry */
472 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
473 txcmd_phys, firstlen, 1, 0);
474
475 if (!ieee80211_has_morefrags(hdr->frame_control)) {
476 txq->need_update = 1;
477 } else {
478 wait_write_ptr = 1;
479 txq->need_update = 0;
480 }
481
482 /* Set up TFD's 2nd entry to point directly to remainder of skb,
483 * if any (802.11 null frames have no payload). */
484 secondlen = skb->len - hdr_len;
485 if (secondlen > 0) {
486 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
487 secondlen, PCI_DMA_TODEVICE);
488 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
489 phys_addr, secondlen,
490 0, 0);
491 }
492
493 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
494 offsetof(struct iwl_tx_cmd, scratch);
495
496 /* take back ownership of DMA buffer to enable update */
497 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
498 firstlen, PCI_DMA_BIDIRECTIONAL);
499 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
500 tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
501
502 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
503 le16_to_cpu(out_cmd->hdr.sequence));
504 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
505 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
506 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
507
508 /* Set up entry for this TFD in Tx byte-count array */
509 if (info->flags & IEEE80211_TX_CTL_AMPDU)
510 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
511 le16_to_cpu(tx_cmd->len));
512
513 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
514 firstlen, PCI_DMA_BIDIRECTIONAL);
515
516 trace_iwlwifi_legacy_dev_tx(priv,
517 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
518 sizeof(struct iwl_tfd),
519 &out_cmd->hdr, firstlen,
520 skb->data + hdr_len, secondlen);
521
522 /* Tell device the write index *just past* this latest filled TFD */
523 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
524 iwl_legacy_txq_update_write_ptr(priv, txq);
525 spin_unlock_irqrestore(&priv->lock, flags);
526
527 /*
528 * At this point the frame is "transmitted" successfully
529 * and we will get a TX status notification eventually,
530 * regardless of the value of ret. "ret" only indicates
531 * whether or not we should update the write pointer.
532 */
533
534 /*
535 * Avoid atomic ops if it isn't an associated client.
536 * Also, if this is a packet for aggregation, don't
537 * increase the counter because the ucode will stop
538 * aggregation queues when their respective station
539 * goes to sleep.
540 */
541 if (sta_priv && sta_priv->client && !is_agg)
542 atomic_inc(&sta_priv->pending_frames);
543
544 if ((iwl_legacy_queue_space(q) < q->high_mark) &&
545 priv->mac80211_registered) {
546 if (wait_write_ptr) {
547 spin_lock_irqsave(&priv->lock, flags);
548 txq->need_update = 1;
549 iwl_legacy_txq_update_write_ptr(priv, txq);
550 spin_unlock_irqrestore(&priv->lock, flags);
551 } else {
552 iwl_legacy_stop_queue(priv, txq);
553 }
554 }
555
556 return 0;
557
558drop_unlock:
559 spin_unlock_irqrestore(&priv->lock, flags);
560 return -1;
561}
562
563static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
564 struct iwl_dma_ptr *ptr, size_t size)
565{
566 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
567 GFP_KERNEL);
568 if (!ptr->addr)
569 return -ENOMEM;
570 ptr->size = size;
571 return 0;
572}
573
574static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
575 struct iwl_dma_ptr *ptr)
576{
577 if (unlikely(!ptr->addr))
578 return;
579
580 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
581 memset(ptr, 0, sizeof(*ptr));
582}
583
584/**
585 * iwl4965_hw_txq_ctx_free - Free TXQ Context
586 *
587 * Destroy all TX DMA queues and structures
588 */
589void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
590{
591 int txq_id;
592
593 /* Tx queues */
594 if (priv->txq) {
595 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
596 if (txq_id == priv->cmd_queue)
597 iwl_legacy_cmd_queue_free(priv);
598 else
599 iwl_legacy_tx_queue_free(priv, txq_id);
600 }
601 iwl4965_free_dma_ptr(priv, &priv->kw);
602
603 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
604
605 /* free tx queue structure */
606 iwl_legacy_txq_mem(priv);
607}
608
609/**
610 * iwl4965_txq_ctx_alloc - allocate TX queue context
611 * Allocate all Tx DMA structures and initialize them
612 *
613 * @param priv
614 * @return error code
615 */
616int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
617{
618 int ret;
619 int txq_id, slots_num;
620 unsigned long flags;
621
622 /* Free all tx/cmd queues and keep-warm buffer */
623 iwl4965_hw_txq_ctx_free(priv);
624
625 ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
626 priv->hw_params.scd_bc_tbls_size);
627 if (ret) {
628 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
629 goto error_bc_tbls;
630 }
631 /* Alloc keep-warm buffer */
632 ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
633 if (ret) {
634 IWL_ERR(priv, "Keep Warm allocation failed\n");
635 goto error_kw;
636 }
637
638 /* allocate tx queue structure */
639 ret = iwl_legacy_alloc_txq_mem(priv);
640 if (ret)
641 goto error;
642
643 spin_lock_irqsave(&priv->lock, flags);
644
645 /* Turn off all Tx DMA fifos */
646 iwl4965_txq_set_sched(priv, 0);
647
648 /* Tell NIC where to find the "keep warm" buffer */
649 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
650
651 spin_unlock_irqrestore(&priv->lock, flags);
652
653 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
654 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
655 slots_num = (txq_id == priv->cmd_queue) ?
656 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
657 ret = iwl_legacy_tx_queue_init(priv,
658 &priv->txq[txq_id], slots_num,
659 txq_id);
660 if (ret) {
661 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
662 goto error;
663 }
664 }
665
666 return ret;
667
668 error:
669 iwl4965_hw_txq_ctx_free(priv);
670 iwl4965_free_dma_ptr(priv, &priv->kw);
671 error_kw:
672 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
673 error_bc_tbls:
674 return ret;
675}
676
677void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
678{
679 int txq_id, slots_num;
680 unsigned long flags;
681
682 spin_lock_irqsave(&priv->lock, flags);
683
684 /* Turn off all Tx DMA fifos */
685 iwl4965_txq_set_sched(priv, 0);
686
687 /* Tell NIC where to find the "keep warm" buffer */
688 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
689
690 spin_unlock_irqrestore(&priv->lock, flags);
691
692 /* Alloc and init all Tx queues, including the command queue (#4) */
693 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
694 slots_num = txq_id == priv->cmd_queue ?
695 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
696 iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
697 slots_num, txq_id);
698 }
699}
700
701/**
702 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
703 */
704void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
705{
706 int ch, txq_id;
707 unsigned long flags;
708
709 /* Turn off all Tx DMA fifos */
710 spin_lock_irqsave(&priv->lock, flags);
711
712 iwl4965_txq_set_sched(priv, 0);
713
714 /* Stop each Tx DMA channel, and wait for it to be idle */
715 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
716 iwl_legacy_write_direct32(priv,
717 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
718 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
719 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
720 1000))
721 IWL_ERR(priv, "Failing on timeout while stopping"
722 " DMA channel %d [0x%08x]", ch,
723 iwl_legacy_read_direct32(priv,
724 FH_TSSR_TX_STATUS_REG));
725 }
726 spin_unlock_irqrestore(&priv->lock, flags);
727
728 if (!priv->txq)
729 return;
730
731 /* Unmap DMA from host system and free skb's */
732 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
733 if (txq_id == priv->cmd_queue)
734 iwl_legacy_cmd_queue_unmap(priv);
735 else
736 iwl_legacy_tx_queue_unmap(priv, txq_id);
737}
738
739/*
740 * Find first available (lowest unused) Tx Queue, mark it "active".
741 * Called only when finding queue for aggregation.
742 * Should never return anything < 7, because they should already
743 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
744 */
745static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
746{
747 int txq_id;
748
749 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
750 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
751 return txq_id;
752 return -1;
753}
754
755/**
756 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
757 */
758static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
759 u16 txq_id)
760{
761 /* Simply stop the queue, but don't change any configuration;
762 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
763 iwl_legacy_write_prph(priv,
764 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
765 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
766 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
767}
768
769/**
770 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
771 */
772static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
773 u16 txq_id)
774{
775 u32 tbl_dw_addr;
776 u32 tbl_dw;
777 u16 scd_q2ratid;
778
779 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
780
781 tbl_dw_addr = priv->scd_base_addr +
782 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
783
784 tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
785
786 if (txq_id & 0x1)
787 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
788 else
789 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
790
791 iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
792
793 return 0;
794}
795
796/**
797 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
798 *
799 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
800 * i.e. it must be one of the higher queues used for aggregation
801 */
802static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
803 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
804{
805 unsigned long flags;
806 u16 ra_tid;
807 int ret;
808
809 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
810 (IWL49_FIRST_AMPDU_QUEUE +
811 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
812 IWL_WARN(priv,
813 "queue number out of range: %d, must be %d to %d\n",
814 txq_id, IWL49_FIRST_AMPDU_QUEUE,
815 IWL49_FIRST_AMPDU_QUEUE +
816 priv->cfg->base_params->num_of_ampdu_queues - 1);
817 return -EINVAL;
818 }
819
820 ra_tid = BUILD_RAxTID(sta_id, tid);
821
822 /* Modify device's station table to Tx this TID */
823 ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
824 if (ret)
825 return ret;
826
827 spin_lock_irqsave(&priv->lock, flags);
828
829 /* Stop this Tx queue before configuring it */
830 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
831
832 /* Map receiver-address / traffic-ID to this queue */
833 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
834
835 /* Set this queue as a chain-building queue */
836 iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
837
838 /* Place first TFD at index corresponding to start sequence number.
839 * Assumes that ssn_idx is valid (!= 0xFFF) */
840 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
841 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
842 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
843
844 /* Set up Tx window size and frame limit for this queue */
845 iwl_legacy_write_targ_mem(priv,
846 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
847 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
848 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
849
850 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
851 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
852 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
853 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
854
855 iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
856
857 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
858 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
859
860 spin_unlock_irqrestore(&priv->lock, flags);
861
862 return 0;
863}
864
865
866int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
867 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
868{
869 int sta_id;
870 int tx_fifo;
871 int txq_id;
872 int ret;
873 unsigned long flags;
874 struct iwl_tid_data *tid_data;
875
876 tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
877 if (unlikely(tx_fifo < 0))
878 return tx_fifo;
879
880 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
881 __func__, sta->addr, tid);
882
883 sta_id = iwl_legacy_sta_id(sta);
884 if (sta_id == IWL_INVALID_STATION) {
885 IWL_ERR(priv, "Start AGG on invalid station\n");
886 return -ENXIO;
887 }
888 if (unlikely(tid >= MAX_TID_COUNT))
889 return -EINVAL;
890
891 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
892 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
893 return -ENXIO;
894 }
895
896 txq_id = iwl4965_txq_ctx_activate_free(priv);
897 if (txq_id == -1) {
898 IWL_ERR(priv, "No free aggregation queue available\n");
899 return -ENXIO;
900 }
901
902 spin_lock_irqsave(&priv->sta_lock, flags);
903 tid_data = &priv->stations[sta_id].tid[tid];
904 *ssn = SEQ_TO_SN(tid_data->seq_number);
905 tid_data->agg.txq_id = txq_id;
906 iwl_legacy_set_swq_id(&priv->txq[txq_id],
907 iwl4965_get_ac_from_tid(tid), txq_id);
908 spin_unlock_irqrestore(&priv->sta_lock, flags);
909
910 ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
911 sta_id, tid, *ssn);
912 if (ret)
913 return ret;
914
915 spin_lock_irqsave(&priv->sta_lock, flags);
916 tid_data = &priv->stations[sta_id].tid[tid];
917 if (tid_data->tfds_in_queue == 0) {
918 IWL_DEBUG_HT(priv, "HW queue is empty\n");
919 tid_data->agg.state = IWL_AGG_ON;
920 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
921 } else {
922 IWL_DEBUG_HT(priv,
923 "HW queue is NOT empty: %d packets in HW queue\n",
924 tid_data->tfds_in_queue);
925 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
926 }
927 spin_unlock_irqrestore(&priv->sta_lock, flags);
928 return ret;
929}
930
931/**
932 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
933 * priv->lock must be held by the caller
934 */
935static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
936 u16 ssn_idx, u8 tx_fifo)
937{
938 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
939 (IWL49_FIRST_AMPDU_QUEUE +
940 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
941 IWL_WARN(priv,
942 "queue number out of range: %d, must be %d to %d\n",
943 txq_id, IWL49_FIRST_AMPDU_QUEUE,
944 IWL49_FIRST_AMPDU_QUEUE +
945 priv->cfg->base_params->num_of_ampdu_queues - 1);
946 return -EINVAL;
947 }
948
949 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
950
951 iwl_legacy_clear_bits_prph(priv,
952 IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
953
954 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
955 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
956 /* supposes that ssn_idx is valid (!= 0xFFF) */
957 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
958
959 iwl_legacy_clear_bits_prph(priv,
960 IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
961 iwl_txq_ctx_deactivate(priv, txq_id);
962 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
963
964 return 0;
965}
966
967int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
968 struct ieee80211_sta *sta, u16 tid)
969{
970 int tx_fifo_id, txq_id, sta_id, ssn;
971 struct iwl_tid_data *tid_data;
972 int write_ptr, read_ptr;
973 unsigned long flags;
974
975 tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
976 if (unlikely(tx_fifo_id < 0))
977 return tx_fifo_id;
978
979 sta_id = iwl_legacy_sta_id(sta);
980
981 if (sta_id == IWL_INVALID_STATION) {
982 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
983 return -ENXIO;
984 }
985
986 spin_lock_irqsave(&priv->sta_lock, flags);
987
988 tid_data = &priv->stations[sta_id].tid[tid];
989 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
990 txq_id = tid_data->agg.txq_id;
991
992 switch (priv->stations[sta_id].tid[tid].agg.state) {
993 case IWL_EMPTYING_HW_QUEUE_ADDBA:
994 /*
995 * This can happen if the peer stops aggregation
996 * again before we've had a chance to drain the
997 * queue we selected previously, i.e. before the
998 * session was really started completely.
999 */
1000 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1001 goto turn_off;
1002 case IWL_AGG_ON:
1003 break;
1004 default:
1005 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1006 }
1007
1008 write_ptr = priv->txq[txq_id].q.write_ptr;
1009 read_ptr = priv->txq[txq_id].q.read_ptr;
1010
1011 /* The queue is not empty */
1012 if (write_ptr != read_ptr) {
1013 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1014 priv->stations[sta_id].tid[tid].agg.state =
1015 IWL_EMPTYING_HW_QUEUE_DELBA;
1016 spin_unlock_irqrestore(&priv->sta_lock, flags);
1017 return 0;
1018 }
1019
1020 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1021 turn_off:
1022 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1023
1024 /* do not restore/save irqs */
1025 spin_unlock(&priv->sta_lock);
1026 spin_lock(&priv->lock);
1027
1028 /*
1029 * the only reason this call can fail is queue number out of range,
1030 * which can happen if uCode is reloaded and all the station
1031 * information are lost. if it is outside the range, there is no need
1032 * to deactivate the uCode queue, just return "success" to allow
1033 * mac80211 to clean up it own data.
1034 */
1035 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1036 spin_unlock_irqrestore(&priv->lock, flags);
1037
1038 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1039
1040 return 0;
1041}
1042
1043int iwl4965_txq_check_empty(struct iwl_priv *priv,
1044 int sta_id, u8 tid, int txq_id)
1045{
1046 struct iwl_queue *q = &priv->txq[txq_id].q;
1047 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1048 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1049 struct iwl_rxon_context *ctx;
1050
1051 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1052
1053 lockdep_assert_held(&priv->sta_lock);
1054
1055 switch (priv->stations[sta_id].tid[tid].agg.state) {
1056 case IWL_EMPTYING_HW_QUEUE_DELBA:
1057 /* We are reclaiming the last packet of the */
1058 /* aggregated HW queue */
1059 if ((txq_id == tid_data->agg.txq_id) &&
1060 (q->read_ptr == q->write_ptr)) {
1061 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1062 int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
1063 IWL_DEBUG_HT(priv,
1064 "HW queue empty: continue DELBA flow\n");
1065 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1066 tid_data->agg.state = IWL_AGG_OFF;
1067 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1068 }
1069 break;
1070 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1071 /* We are reclaiming the last packet of the queue */
1072 if (tid_data->tfds_in_queue == 0) {
1073 IWL_DEBUG_HT(priv,
1074 "HW queue empty: continue ADDBA flow\n");
1075 tid_data->agg.state = IWL_AGG_ON;
1076 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1077 }
1078 break;
1079 }
1080
1081 return 0;
1082}
1083
1084static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
1085 struct iwl_rxon_context *ctx,
1086 const u8 *addr1)
1087{
1088 struct ieee80211_sta *sta;
1089 struct iwl_station_priv *sta_priv;
1090
1091 rcu_read_lock();
1092 sta = ieee80211_find_sta(ctx->vif, addr1);
1093 if (sta) {
1094 sta_priv = (void *)sta->drv_priv;
1095 /* avoid atomic ops if this isn't a client */
1096 if (sta_priv->client &&
1097 atomic_dec_return(&sta_priv->pending_frames) == 0)
1098 ieee80211_sta_block_awake(priv->hw, sta, false);
1099 }
1100 rcu_read_unlock();
1101}
1102
1103static void
1104iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1105 bool is_agg)
1106{
1107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1108
1109 if (!is_agg)
1110 iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1111
1112 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1113}
1114
1115int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1116{
1117 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1118 struct iwl_queue *q = &txq->q;
1119 struct iwl_tx_info *tx_info;
1120 int nfreed = 0;
1121 struct ieee80211_hdr *hdr;
1122
1123 if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
1124 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1125 "is out of range [0-%d] %d %d.\n", txq_id,
1126 index, q->n_bd, q->write_ptr, q->read_ptr);
1127 return 0;
1128 }
1129
1130 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
1131 q->read_ptr != index;
1132 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1133
1134 tx_info = &txq->txb[txq->q.read_ptr];
1135
1136 if (WARN_ON_ONCE(tx_info->skb == NULL))
1137 continue;
1138
1139 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1140 if (ieee80211_is_data_qos(hdr->frame_control))
1141 nfreed++;
1142
1143 iwl4965_tx_status(priv, tx_info,
1144 txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
1145 tx_info->skb = NULL;
1146
1147 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1148 }
1149 return nfreed;
1150}
1151
1152/**
1153 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1154 *
1155 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1156 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1157 */
1158static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1159 struct iwl_ht_agg *agg,
1160 struct iwl_compressed_ba_resp *ba_resp)
1161
1162{
1163 int i, sh, ack;
1164 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1165 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1166 int successes = 0;
1167 struct ieee80211_tx_info *info;
1168 u64 bitmap, sent_bitmap;
1169
1170 if (unlikely(!agg->wait_for_ba)) {
1171 if (unlikely(ba_resp->bitmap))
1172 IWL_ERR(priv, "Received BA when not expected\n");
1173 return -EINVAL;
1174 }
1175
1176 /* Mark that the expected block-ack response arrived */
1177 agg->wait_for_ba = 0;
1178 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
1179 ba_resp->seq_ctl);
1180
1181 /* Calculate shift to align block-ack bits with our Tx window bits */
1182 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1183 if (sh < 0) /* tbw something is wrong with indices */
1184 sh += 0x100;
1185
1186 if (agg->frame_count > (64 - sh)) {
1187 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1188 return -1;
1189 }
1190
1191 /* don't use 64-bit values for now */
1192 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1193
1194 /* check for success or failure according to the
1195 * transmitted bitmap and block-ack bitmap */
1196 sent_bitmap = bitmap & agg->bitmap;
1197
1198 /* For each frame attempted in aggregation,
1199 * update driver's record of tx frame's status. */
1200 i = 0;
1201 while (sent_bitmap) {
1202 ack = sent_bitmap & 1ULL;
1203 successes += ack;
1204 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1205 ack ? "ACK" : "NACK", i,
1206 (agg->start_idx + i) & 0xff,
1207 agg->start_idx + i);
1208 sent_bitmap >>= 1;
1209 ++i;
1210 }
1211
1212 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1213 (unsigned long long)bitmap);
1214
1215 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1216 memset(&info->status, 0, sizeof(info->status));
1217 info->flags |= IEEE80211_TX_STAT_ACK;
1218 info->flags |= IEEE80211_TX_STAT_AMPDU;
1219 info->status.ampdu_ack_len = successes;
1220 info->status.ampdu_len = agg->frame_count;
1221 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1222
1223 return 0;
1224}
1225
1226/**
1227 * translate ucode response to mac80211 tx status control values
1228 */
1229void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1230 struct ieee80211_tx_info *info)
1231{
1232 struct ieee80211_tx_rate *r = &info->control.rates[0];
1233
1234 info->antenna_sel_tx =
1235 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1236 if (rate_n_flags & RATE_MCS_HT_MSK)
1237 r->flags |= IEEE80211_TX_RC_MCS;
1238 if (rate_n_flags & RATE_MCS_GF_MSK)
1239 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1240 if (rate_n_flags & RATE_MCS_HT40_MSK)
1241 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1242 if (rate_n_flags & RATE_MCS_DUP_MSK)
1243 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1244 if (rate_n_flags & RATE_MCS_SGI_MSK)
1245 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1246 r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1247}
1248
1249/**
1250 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1251 *
1252 * Handles block-acknowledge notification from device, which reports success
1253 * of frames sent via aggregation.
1254 */
1255void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
1256 struct iwl_rx_mem_buffer *rxb)
1257{
1258 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1259 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1260 struct iwl_tx_queue *txq = NULL;
1261 struct iwl_ht_agg *agg;
1262 int index;
1263 int sta_id;
1264 int tid;
1265 unsigned long flags;
1266
1267 /* "flow" corresponds to Tx queue */
1268 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1269
1270 /* "ssn" is start of block-ack Tx window, corresponds to index
1271 * (in Tx queue's circular buffer) of first TFD/frame in window */
1272 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1273
1274 if (scd_flow >= priv->hw_params.max_txq_num) {
1275 IWL_ERR(priv,
1276 "BUG_ON scd_flow is bigger than number of queues\n");
1277 return;
1278 }
1279
1280 txq = &priv->txq[scd_flow];
1281 sta_id = ba_resp->sta_id;
1282 tid = ba_resp->tid;
1283 agg = &priv->stations[sta_id].tid[tid].agg;
1284 if (unlikely(agg->txq_id != scd_flow)) {
1285 /*
1286 * FIXME: this is a uCode bug which need to be addressed,
1287 * log the information and return for now!
1288 * since it is possible happen very often and in order
1289 * not to fill the syslog, don't enable the logging by default
1290 */
1291 IWL_DEBUG_TX_REPLY(priv,
1292 "BA scd_flow %d does not match txq_id %d\n",
1293 scd_flow, agg->txq_id);
1294 return;
1295 }
1296
1297 /* Find index just before block-ack window */
1298 index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1299
1300 spin_lock_irqsave(&priv->sta_lock, flags);
1301
1302 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1303 "sta_id = %d\n",
1304 agg->wait_for_ba,
1305 (u8 *) &ba_resp->sta_addr_lo32,
1306 ba_resp->sta_id);
1307 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1308 "scd_flow = "
1309 "%d, scd_ssn = %d\n",
1310 ba_resp->tid,
1311 ba_resp->seq_ctl,
1312 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1313 ba_resp->scd_flow,
1314 ba_resp->scd_ssn);
1315 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1316 agg->start_idx,
1317 (unsigned long long)agg->bitmap);
1318
1319 /* Update driver's record of ACK vs. not for each frame in window */
1320 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1321
1322 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1323 * block-ack window (we assume that they've been successfully
1324 * transmitted ... if not, it's too late anyway). */
1325 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1326 /* calculate mac80211 ampdu sw queue to wake */
1327 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
1328 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1329
1330 if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
1331 priv->mac80211_registered &&
1332 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1333 iwl_legacy_wake_queue(priv, txq);
1334
1335 iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
1336 }
1337
1338 spin_unlock_irqrestore(&priv->sta_lock, flags);
1339}
1340
1341#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1342const char *iwl4965_get_tx_fail_reason(u32 status)
1343{
1344#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1345#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1346
1347 switch (status & TX_STATUS_MSK) {
1348 case TX_STATUS_SUCCESS:
1349 return "SUCCESS";
1350 TX_STATUS_POSTPONE(DELAY);
1351 TX_STATUS_POSTPONE(FEW_BYTES);
1352 TX_STATUS_POSTPONE(QUIET_PERIOD);
1353 TX_STATUS_POSTPONE(CALC_TTAK);
1354 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1355 TX_STATUS_FAIL(SHORT_LIMIT);
1356 TX_STATUS_FAIL(LONG_LIMIT);
1357 TX_STATUS_FAIL(FIFO_UNDERRUN);
1358 TX_STATUS_FAIL(DRAIN_FLOW);
1359 TX_STATUS_FAIL(RFKILL_FLUSH);
1360 TX_STATUS_FAIL(LIFE_EXPIRE);
1361 TX_STATUS_FAIL(DEST_PS);
1362 TX_STATUS_FAIL(HOST_ABORTED);
1363 TX_STATUS_FAIL(BT_RETRY);
1364 TX_STATUS_FAIL(STA_INVALID);
1365 TX_STATUS_FAIL(FRAG_DROPPED);
1366 TX_STATUS_FAIL(TID_DISABLE);
1367 TX_STATUS_FAIL(FIFO_FLUSHED);
1368 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1369 TX_STATUS_FAIL(PASSIVE_NO_RX);
1370 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1371 }
1372
1373 return "UNKNOWN";
1374
1375#undef TX_STATUS_FAIL
1376#undef TX_STATUS_POSTPONE
1377}
1378#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
deleted file mode 100644
index 001d148feb94..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-4965-calib.h"
42
43#define IWL_AC_UNSET -1
44
45/**
46 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
47 * using sample data 100 bytes apart. If these sample points are good,
48 * it's a pretty good bet that everything between them is good, too.
49 */
50static int
51iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
52{
53 u32 val;
54 int ret = 0;
55 u32 errcnt = 0;
56 u32 i;
57
58 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
59
60 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
61 /* read data comes through single port, auto-incr addr */
62 /* NOTE: Use the debugless read so we don't flood kernel log
63 * if IWL_DL_IO is set */
64 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
65 i + IWL4965_RTC_INST_LOWER_BOUND);
66 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
67 if (val != le32_to_cpu(*image)) {
68 ret = -EIO;
69 errcnt++;
70 if (errcnt >= 3)
71 break;
72 }
73 }
74
75 return ret;
76}
77
78/**
79 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
80 * looking at all data.
81 */
82static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
83 u32 len)
84{
85 u32 val;
86 u32 save_len = len;
87 int ret = 0;
88 u32 errcnt;
89
90 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
91
92 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
93 IWL4965_RTC_INST_LOWER_BOUND);
94
95 errcnt = 0;
96 for (; len > 0; len -= sizeof(u32), image++) {
97 /* read data comes through single port, auto-incr addr */
98 /* NOTE: Use the debugless read so we don't flood kernel log
99 * if IWL_DL_IO is set */
100 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
101 if (val != le32_to_cpu(*image)) {
102 IWL_ERR(priv, "uCode INST section is invalid at "
103 "offset 0x%x, is 0x%x, s/b 0x%x\n",
104 save_len - len, val, le32_to_cpu(*image));
105 ret = -EIO;
106 errcnt++;
107 if (errcnt >= 20)
108 break;
109 }
110 }
111
112 if (!errcnt)
113 IWL_DEBUG_INFO(priv,
114 "ucode image in INSTRUCTION memory is good\n");
115
116 return ret;
117}
118
119/**
120 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
121 * and verify its contents
122 */
123int iwl4965_verify_ucode(struct iwl_priv *priv)
124{
125 __le32 *image;
126 u32 len;
127 int ret;
128
129 /* Try bootstrap */
130 image = (__le32 *)priv->ucode_boot.v_addr;
131 len = priv->ucode_boot.len;
132 ret = iwl4965_verify_inst_sparse(priv, image, len);
133 if (!ret) {
134 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
135 return 0;
136 }
137
138 /* Try initialize */
139 image = (__le32 *)priv->ucode_init.v_addr;
140 len = priv->ucode_init.len;
141 ret = iwl4965_verify_inst_sparse(priv, image, len);
142 if (!ret) {
143 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
144 return 0;
145 }
146
147 /* Try runtime/protocol */
148 image = (__le32 *)priv->ucode_code.v_addr;
149 len = priv->ucode_code.len;
150 ret = iwl4965_verify_inst_sparse(priv, image, len);
151 if (!ret) {
152 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
153 return 0;
154 }
155
156 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
157
158 /* Since nothing seems to match, show first several data entries in
159 * instruction SRAM, so maybe visual inspection will give a clue.
160 * Selection of bootstrap image (vs. other images) is arbitrary. */
161 image = (__le32 *)priv->ucode_boot.v_addr;
162 len = priv->ucode_boot.len;
163 ret = iwl4965_verify_inst_full(priv, image, len);
164
165 return ret;
166}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
deleted file mode 100644
index 86f4fce193e4..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ /dev/null
@@ -1,2183 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-helpers.h"
45#include "iwl-4965-calib.h"
46#include "iwl-sta.h"
47#include "iwl-4965-led.h"
48#include "iwl-4965.h"
49#include "iwl-4965-debugfs.h"
50
51static int iwl4965_send_tx_power(struct iwl_priv *priv);
52static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
53
54/* Highest firmware API version supported */
55#define IWL4965_UCODE_API_MAX 2
56
57/* Lowest firmware API version supported */
58#define IWL4965_UCODE_API_MIN 2
59
60#define IWL4965_FW_PRE "iwlwifi-4965-"
61#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
62#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
63
64/* check contents of special bootstrap uCode SRAM */
65static int iwl4965_verify_bsm(struct iwl_priv *priv)
66{
67 __le32 *image = priv->ucode_boot.v_addr;
68 u32 len = priv->ucode_boot.len;
69 u32 reg;
70 u32 val;
71
72 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
73
74 /* verify BSM SRAM contents */
75 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
76 for (reg = BSM_SRAM_LOWER_BOUND;
77 reg < BSM_SRAM_LOWER_BOUND + len;
78 reg += sizeof(u32), image++) {
79 val = iwl_legacy_read_prph(priv, reg);
80 if (val != le32_to_cpu(*image)) {
81 IWL_ERR(priv, "BSM uCode verification failed at "
82 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
83 BSM_SRAM_LOWER_BOUND,
84 reg - BSM_SRAM_LOWER_BOUND, len,
85 val, le32_to_cpu(*image));
86 return -EIO;
87 }
88 }
89
90 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
91
92 return 0;
93}
94
95/**
96 * iwl4965_load_bsm - Load bootstrap instructions
97 *
98 * BSM operation:
99 *
100 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
101 * in special SRAM that does not power down during RFKILL. When powering back
102 * up after power-saving sleeps (or during initial uCode load), the BSM loads
103 * the bootstrap program into the on-board processor, and starts it.
104 *
105 * The bootstrap program loads (via DMA) instructions and data for a new
106 * program from host DRAM locations indicated by the host driver in the
107 * BSM_DRAM_* registers. Once the new program is loaded, it starts
108 * automatically.
109 *
110 * When initializing the NIC, the host driver points the BSM to the
111 * "initialize" uCode image. This uCode sets up some internal data, then
112 * notifies host via "initialize alive" that it is complete.
113 *
114 * The host then replaces the BSM_DRAM_* pointer values to point to the
115 * normal runtime uCode instructions and a backup uCode data cache buffer
116 * (filled initially with starting data values for the on-board processor),
117 * then triggers the "initialize" uCode to load and launch the runtime uCode,
118 * which begins normal operation.
119 *
120 * When doing a power-save shutdown, runtime uCode saves data SRAM into
121 * the backup data cache in DRAM before SRAM is powered down.
122 *
123 * When powering back up, the BSM loads the bootstrap program. This reloads
124 * the runtime uCode instructions and the backup data cache into SRAM,
125 * and re-launches the runtime uCode from where it left off.
126 */
127static int iwl4965_load_bsm(struct iwl_priv *priv)
128{
129 __le32 *image = priv->ucode_boot.v_addr;
130 u32 len = priv->ucode_boot.len;
131 dma_addr_t pinst;
132 dma_addr_t pdata;
133 u32 inst_len;
134 u32 data_len;
135 int i;
136 u32 done;
137 u32 reg_offset;
138 int ret;
139
140 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
141
142 priv->ucode_type = UCODE_RT;
143
144 /* make sure bootstrap program is no larger than BSM's SRAM size */
145 if (len > IWL49_MAX_BSM_SIZE)
146 return -EINVAL;
147
148 /* Tell bootstrap uCode where to find the "Initialize" uCode
149 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
150 * NOTE: iwl_init_alive_start() will replace these values,
151 * after the "initialize" uCode has run, to point to
152 * runtime/protocol instructions and backup data cache.
153 */
154 pinst = priv->ucode_init.p_addr >> 4;
155 pdata = priv->ucode_init_data.p_addr >> 4;
156 inst_len = priv->ucode_init.len;
157 data_len = priv->ucode_init_data.len;
158
159 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
160 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
161 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
162 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
163
164 /* Fill BSM memory with bootstrap instructions */
165 for (reg_offset = BSM_SRAM_LOWER_BOUND;
166 reg_offset < BSM_SRAM_LOWER_BOUND + len;
167 reg_offset += sizeof(u32), image++)
168 _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
169
170 ret = iwl4965_verify_bsm(priv);
171 if (ret)
172 return ret;
173
174 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
175 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
176 iwl_legacy_write_prph(priv,
177 BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
178 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
179
180 /* Load bootstrap code into instruction SRAM now,
181 * to prepare to load "initialize" uCode */
182 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
183
184 /* Wait for load of bootstrap uCode to finish */
185 for (i = 0; i < 100; i++) {
186 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
187 if (!(done & BSM_WR_CTRL_REG_BIT_START))
188 break;
189 udelay(10);
190 }
191 if (i < 100)
192 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
193 else {
194 IWL_ERR(priv, "BSM write did not complete!\n");
195 return -EIO;
196 }
197
198 /* Enable future boot loads whenever power management unit triggers it
199 * (e.g. when powering back up after power-save shutdown) */
200 iwl_legacy_write_prph(priv,
201 BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
202
203
204 return 0;
205}
206
207/**
208 * iwl4965_set_ucode_ptrs - Set uCode address location
209 *
210 * Tell initialization uCode where to find runtime uCode.
211 *
212 * BSM registers initially contain pointers to initialization uCode.
213 * We need to replace them to load runtime uCode inst and data,
214 * and to save runtime data when powering down.
215 */
216static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
217{
218 dma_addr_t pinst;
219 dma_addr_t pdata;
220 int ret = 0;
221
222 /* bits 35:4 for 4965 */
223 pinst = priv->ucode_code.p_addr >> 4;
224 pdata = priv->ucode_data_backup.p_addr >> 4;
225
226 /* Tell bootstrap uCode where to find image to load */
227 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
228 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
229 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
230 priv->ucode_data.len);
231
232 /* Inst byte count must be last to set up, bit 31 signals uCode
233 * that all new ptr/size info is in place */
234 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
235 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
236 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
237
238 return ret;
239}
240
241/**
242 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
243 *
244 * Called after REPLY_ALIVE notification received from "initialize" uCode.
245 *
246 * The 4965 "initialize" ALIVE reply contains calibration data for:
247 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
248 * (3945 does not contain this data).
249 *
250 * Tell "initialize" uCode to go ahead and load the runtime uCode.
251*/
252static void iwl4965_init_alive_start(struct iwl_priv *priv)
253{
254 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
255 * This is a paranoid check, because we would not have gotten the
256 * "initialize" alive if code weren't properly loaded. */
257 if (iwl4965_verify_ucode(priv)) {
258 /* Runtime instruction load was bad;
259 * take it all the way back down so we can try again */
260 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
261 goto restart;
262 }
263
264 /* Calculate temperature */
265 priv->temperature = iwl4965_hw_get_temperature(priv);
266
267 /* Send pointers to protocol/runtime uCode image ... init code will
268 * load and launch runtime uCode, which will send us another "Alive"
269 * notification. */
270 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
271 if (iwl4965_set_ucode_ptrs(priv)) {
272 /* Runtime instruction load won't happen;
273 * take it all the way back down so we can try again */
274 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
275 goto restart;
276 }
277 return;
278
279restart:
280 queue_work(priv->workqueue, &priv->restart);
281}
282
283static bool iw4965_is_ht40_channel(__le32 rxon_flags)
284{
285 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
286 >> RXON_FLG_CHANNEL_MODE_POS;
287 return ((chan_mod == CHANNEL_MODE_PURE_40) ||
288 (chan_mod == CHANNEL_MODE_MIXED));
289}
290
291static void iwl4965_nic_config(struct iwl_priv *priv)
292{
293 unsigned long flags;
294 u16 radio_cfg;
295
296 spin_lock_irqsave(&priv->lock, flags);
297
298 radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
299
300 /* write radio config values to register */
301 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
302 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
303 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
304 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
305 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
306
307 /* set CSR_HW_CONFIG_REG for uCode use */
308 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
309 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
310 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
311
312 priv->calib_info = (struct iwl_eeprom_calib_info *)
313 iwl_legacy_eeprom_query_addr(priv,
314 EEPROM_4965_CALIB_TXPOWER_OFFSET);
315
316 spin_unlock_irqrestore(&priv->lock, flags);
317}
318
319/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
320 * Called after every association, but this runs only once!
321 * ... once chain noise is calibrated the first time, it's good forever. */
322static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
323{
324 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
325
326 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
327 iwl_legacy_is_any_associated(priv)) {
328 struct iwl_calib_diff_gain_cmd cmd;
329
330 /* clear data for chain noise calibration algorithm */
331 data->chain_noise_a = 0;
332 data->chain_noise_b = 0;
333 data->chain_noise_c = 0;
334 data->chain_signal_a = 0;
335 data->chain_signal_b = 0;
336 data->chain_signal_c = 0;
337 data->beacon_count = 0;
338
339 memset(&cmd, 0, sizeof(cmd));
340 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
341 cmd.diff_gain_a = 0;
342 cmd.diff_gain_b = 0;
343 cmd.diff_gain_c = 0;
344 if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
345 sizeof(cmd), &cmd))
346 IWL_ERR(priv,
347 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
348 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
349 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
350 }
351}
352
353static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
354 .min_nrg_cck = 97,
355 .max_nrg_cck = 0, /* not used, set to 0 */
356
357 .auto_corr_min_ofdm = 85,
358 .auto_corr_min_ofdm_mrc = 170,
359 .auto_corr_min_ofdm_x1 = 105,
360 .auto_corr_min_ofdm_mrc_x1 = 220,
361
362 .auto_corr_max_ofdm = 120,
363 .auto_corr_max_ofdm_mrc = 210,
364 .auto_corr_max_ofdm_x1 = 140,
365 .auto_corr_max_ofdm_mrc_x1 = 270,
366
367 .auto_corr_min_cck = 125,
368 .auto_corr_max_cck = 200,
369 .auto_corr_min_cck_mrc = 200,
370 .auto_corr_max_cck_mrc = 400,
371
372 .nrg_th_cck = 100,
373 .nrg_th_ofdm = 100,
374
375 .barker_corr_th_min = 190,
376 .barker_corr_th_min_mrc = 390,
377 .nrg_th_cca = 62,
378};
379
380static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
381{
382 /* want Kelvin */
383 priv->hw_params.ct_kill_threshold =
384 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
385}
386
387/**
388 * iwl4965_hw_set_hw_params
389 *
390 * Called when initializing driver
391 */
392static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
393{
394 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
395 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
396 priv->cfg->base_params->num_of_queues =
397 priv->cfg->mod_params->num_of_queues;
398
399 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
400 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
401 priv->hw_params.scd_bc_tbls_size =
402 priv->cfg->base_params->num_of_queues *
403 sizeof(struct iwl4965_scd_bc_tbl);
404 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
405 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
406 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
407 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
408 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
409 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
410 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
411
412 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
413
414 priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
415 priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
416 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
417 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
418
419 iwl4965_set_ct_threshold(priv);
420
421 priv->hw_params.sens = &iwl4965_sensitivity;
422 priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
423
424 return 0;
425}
426
427static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
428{
429 s32 sign = 1;
430
431 if (num < 0) {
432 sign = -sign;
433 num = -num;
434 }
435 if (denom < 0) {
436 sign = -sign;
437 denom = -denom;
438 }
439 *res = 1;
440 *res = ((num * 2 + denom) / (denom * 2)) * sign;
441
442 return 1;
443}
444
445/**
446 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
447 *
448 * Determines power supply voltage compensation for txpower calculations.
449 * Returns number of 1/2-dB steps to subtract from gain table index,
450 * to compensate for difference between power supply voltage during
451 * factory measurements, vs. current power supply voltage.
452 *
453 * Voltage indication is higher for lower voltage.
454 * Lower voltage requires more gain (lower gain table index).
455 */
456static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
457 s32 current_voltage)
458{
459 s32 comp = 0;
460
461 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
462 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
463 return 0;
464
465 iwl4965_math_div_round(current_voltage - eeprom_voltage,
466 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
467
468 if (current_voltage > eeprom_voltage)
469 comp *= 2;
470 if ((comp < -2) || (comp > 2))
471 comp = 0;
472
473 return comp;
474}
475
476static s32 iwl4965_get_tx_atten_grp(u16 channel)
477{
478 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
479 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
480 return CALIB_CH_GROUP_5;
481
482 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
483 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
484 return CALIB_CH_GROUP_1;
485
486 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
487 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
488 return CALIB_CH_GROUP_2;
489
490 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
491 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
492 return CALIB_CH_GROUP_3;
493
494 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
495 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
496 return CALIB_CH_GROUP_4;
497
498 return -EINVAL;
499}
500
501static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
502{
503 s32 b = -1;
504
505 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
506 if (priv->calib_info->band_info[b].ch_from == 0)
507 continue;
508
509 if ((channel >= priv->calib_info->band_info[b].ch_from)
510 && (channel <= priv->calib_info->band_info[b].ch_to))
511 break;
512 }
513
514 return b;
515}
516
517static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
518{
519 s32 val;
520
521 if (x2 == x1)
522 return y1;
523 else {
524 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
525 return val + y2;
526 }
527}
528
529/**
530 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
531 *
532 * Interpolates factory measurements from the two sample channels within a
533 * sub-band, to apply to channel of interest. Interpolation is proportional to
534 * differences in channel frequencies, which is proportional to differences
535 * in channel number.
536 */
537static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
538 struct iwl_eeprom_calib_ch_info *chan_info)
539{
540 s32 s = -1;
541 u32 c;
542 u32 m;
543 const struct iwl_eeprom_calib_measure *m1;
544 const struct iwl_eeprom_calib_measure *m2;
545 struct iwl_eeprom_calib_measure *omeas;
546 u32 ch_i1;
547 u32 ch_i2;
548
549 s = iwl4965_get_sub_band(priv, channel);
550 if (s >= EEPROM_TX_POWER_BANDS) {
551 IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
552 return -1;
553 }
554
555 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
556 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
557 chan_info->ch_num = (u8) channel;
558
559 IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
560 channel, s, ch_i1, ch_i2);
561
562 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
563 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
564 m1 = &(priv->calib_info->band_info[s].ch1.
565 measurements[c][m]);
566 m2 = &(priv->calib_info->band_info[s].ch2.
567 measurements[c][m]);
568 omeas = &(chan_info->measurements[c][m]);
569
570 omeas->actual_pow =
571 (u8) iwl4965_interpolate_value(channel, ch_i1,
572 m1->actual_pow,
573 ch_i2,
574 m2->actual_pow);
575 omeas->gain_idx =
576 (u8) iwl4965_interpolate_value(channel, ch_i1,
577 m1->gain_idx, ch_i2,
578 m2->gain_idx);
579 omeas->temperature =
580 (u8) iwl4965_interpolate_value(channel, ch_i1,
581 m1->temperature,
582 ch_i2,
583 m2->temperature);
584 omeas->pa_det =
585 (s8) iwl4965_interpolate_value(channel, ch_i1,
586 m1->pa_det, ch_i2,
587 m2->pa_det);
588
589 IWL_DEBUG_TXPOWER(priv,
590 "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
591 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
592 IWL_DEBUG_TXPOWER(priv,
593 "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
594 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
595 IWL_DEBUG_TXPOWER(priv,
596 "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
597 m1->pa_det, m2->pa_det, omeas->pa_det);
598 IWL_DEBUG_TXPOWER(priv,
599 "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
600 m1->temperature, m2->temperature,
601 omeas->temperature);
602 }
603 }
604
605 return 0;
606}
607
608/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
609 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
610static s32 back_off_table[] = {
611 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
612 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
613 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
614 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
615 10 /* CCK */
616};
617
618/* Thermal compensation values for txpower for various frequency ranges ...
619 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
620static struct iwl4965_txpower_comp_entry {
621 s32 degrees_per_05db_a;
622 s32 degrees_per_05db_a_denom;
623} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
624 {9, 2}, /* group 0 5.2, ch 34-43 */
625 {4, 1}, /* group 1 5.2, ch 44-70 */
626 {4, 1}, /* group 2 5.2, ch 71-124 */
627 {4, 1}, /* group 3 5.2, ch 125-200 */
628 {3, 1} /* group 4 2.4, ch all */
629};
630
631static s32 get_min_power_index(s32 rate_power_index, u32 band)
632{
633 if (!band) {
634 if ((rate_power_index & 7) <= 4)
635 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
636 }
637 return MIN_TX_GAIN_INDEX;
638}
639
640struct gain_entry {
641 u8 dsp;
642 u8 radio;
643};
644
645static const struct gain_entry gain_table[2][108] = {
646 /* 5.2GHz power gain index table */
647 {
648 {123, 0x3F}, /* highest txpower */
649 {117, 0x3F},
650 {110, 0x3F},
651 {104, 0x3F},
652 {98, 0x3F},
653 {110, 0x3E},
654 {104, 0x3E},
655 {98, 0x3E},
656 {110, 0x3D},
657 {104, 0x3D},
658 {98, 0x3D},
659 {110, 0x3C},
660 {104, 0x3C},
661 {98, 0x3C},
662 {110, 0x3B},
663 {104, 0x3B},
664 {98, 0x3B},
665 {110, 0x3A},
666 {104, 0x3A},
667 {98, 0x3A},
668 {110, 0x39},
669 {104, 0x39},
670 {98, 0x39},
671 {110, 0x38},
672 {104, 0x38},
673 {98, 0x38},
674 {110, 0x37},
675 {104, 0x37},
676 {98, 0x37},
677 {110, 0x36},
678 {104, 0x36},
679 {98, 0x36},
680 {110, 0x35},
681 {104, 0x35},
682 {98, 0x35},
683 {110, 0x34},
684 {104, 0x34},
685 {98, 0x34},
686 {110, 0x33},
687 {104, 0x33},
688 {98, 0x33},
689 {110, 0x32},
690 {104, 0x32},
691 {98, 0x32},
692 {110, 0x31},
693 {104, 0x31},
694 {98, 0x31},
695 {110, 0x30},
696 {104, 0x30},
697 {98, 0x30},
698 {110, 0x25},
699 {104, 0x25},
700 {98, 0x25},
701 {110, 0x24},
702 {104, 0x24},
703 {98, 0x24},
704 {110, 0x23},
705 {104, 0x23},
706 {98, 0x23},
707 {110, 0x22},
708 {104, 0x18},
709 {98, 0x18},
710 {110, 0x17},
711 {104, 0x17},
712 {98, 0x17},
713 {110, 0x16},
714 {104, 0x16},
715 {98, 0x16},
716 {110, 0x15},
717 {104, 0x15},
718 {98, 0x15},
719 {110, 0x14},
720 {104, 0x14},
721 {98, 0x14},
722 {110, 0x13},
723 {104, 0x13},
724 {98, 0x13},
725 {110, 0x12},
726 {104, 0x08},
727 {98, 0x08},
728 {110, 0x07},
729 {104, 0x07},
730 {98, 0x07},
731 {110, 0x06},
732 {104, 0x06},
733 {98, 0x06},
734 {110, 0x05},
735 {104, 0x05},
736 {98, 0x05},
737 {110, 0x04},
738 {104, 0x04},
739 {98, 0x04},
740 {110, 0x03},
741 {104, 0x03},
742 {98, 0x03},
743 {110, 0x02},
744 {104, 0x02},
745 {98, 0x02},
746 {110, 0x01},
747 {104, 0x01},
748 {98, 0x01},
749 {110, 0x00},
750 {104, 0x00},
751 {98, 0x00},
752 {93, 0x00},
753 {88, 0x00},
754 {83, 0x00},
755 {78, 0x00},
756 },
757 /* 2.4GHz power gain index table */
758 {
759 {110, 0x3f}, /* highest txpower */
760 {104, 0x3f},
761 {98, 0x3f},
762 {110, 0x3e},
763 {104, 0x3e},
764 {98, 0x3e},
765 {110, 0x3d},
766 {104, 0x3d},
767 {98, 0x3d},
768 {110, 0x3c},
769 {104, 0x3c},
770 {98, 0x3c},
771 {110, 0x3b},
772 {104, 0x3b},
773 {98, 0x3b},
774 {110, 0x3a},
775 {104, 0x3a},
776 {98, 0x3a},
777 {110, 0x39},
778 {104, 0x39},
779 {98, 0x39},
780 {110, 0x38},
781 {104, 0x38},
782 {98, 0x38},
783 {110, 0x37},
784 {104, 0x37},
785 {98, 0x37},
786 {110, 0x36},
787 {104, 0x36},
788 {98, 0x36},
789 {110, 0x35},
790 {104, 0x35},
791 {98, 0x35},
792 {110, 0x34},
793 {104, 0x34},
794 {98, 0x34},
795 {110, 0x33},
796 {104, 0x33},
797 {98, 0x33},
798 {110, 0x32},
799 {104, 0x32},
800 {98, 0x32},
801 {110, 0x31},
802 {104, 0x31},
803 {98, 0x31},
804 {110, 0x30},
805 {104, 0x30},
806 {98, 0x30},
807 {110, 0x6},
808 {104, 0x6},
809 {98, 0x6},
810 {110, 0x5},
811 {104, 0x5},
812 {98, 0x5},
813 {110, 0x4},
814 {104, 0x4},
815 {98, 0x4},
816 {110, 0x3},
817 {104, 0x3},
818 {98, 0x3},
819 {110, 0x2},
820 {104, 0x2},
821 {98, 0x2},
822 {110, 0x1},
823 {104, 0x1},
824 {98, 0x1},
825 {110, 0x0},
826 {104, 0x0},
827 {98, 0x0},
828 {97, 0},
829 {96, 0},
830 {95, 0},
831 {94, 0},
832 {93, 0},
833 {92, 0},
834 {91, 0},
835 {90, 0},
836 {89, 0},
837 {88, 0},
838 {87, 0},
839 {86, 0},
840 {85, 0},
841 {84, 0},
842 {83, 0},
843 {82, 0},
844 {81, 0},
845 {80, 0},
846 {79, 0},
847 {78, 0},
848 {77, 0},
849 {76, 0},
850 {75, 0},
851 {74, 0},
852 {73, 0},
853 {72, 0},
854 {71, 0},
855 {70, 0},
856 {69, 0},
857 {68, 0},
858 {67, 0},
859 {66, 0},
860 {65, 0},
861 {64, 0},
862 {63, 0},
863 {62, 0},
864 {61, 0},
865 {60, 0},
866 {59, 0},
867 }
868};
869
870static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
871 u8 is_ht40, u8 ctrl_chan_high,
872 struct iwl4965_tx_power_db *tx_power_tbl)
873{
874 u8 saturation_power;
875 s32 target_power;
876 s32 user_target_power;
877 s32 power_limit;
878 s32 current_temp;
879 s32 reg_limit;
880 s32 current_regulatory;
881 s32 txatten_grp = CALIB_CH_GROUP_MAX;
882 int i;
883 int c;
884 const struct iwl_channel_info *ch_info = NULL;
885 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
886 const struct iwl_eeprom_calib_measure *measurement;
887 s16 voltage;
888 s32 init_voltage;
889 s32 voltage_compensation;
890 s32 degrees_per_05db_num;
891 s32 degrees_per_05db_denom;
892 s32 factory_temp;
893 s32 temperature_comp[2];
894 s32 factory_gain_index[2];
895 s32 factory_actual_pwr[2];
896 s32 power_index;
897
898 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
899 * are used for indexing into txpower table) */
900 user_target_power = 2 * priv->tx_power_user_lmt;
901
902 /* Get current (RXON) channel, band, width */
903 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
904 is_ht40);
905
906 ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
907
908 if (!iwl_legacy_is_channel_valid(ch_info))
909 return -EINVAL;
910
911 /* get txatten group, used to select 1) thermal txpower adjustment
912 * and 2) mimo txpower balance between Tx chains. */
913 txatten_grp = iwl4965_get_tx_atten_grp(channel);
914 if (txatten_grp < 0) {
915 IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
916 channel);
917 return txatten_grp;
918 }
919
920 IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
921 channel, txatten_grp);
922
923 if (is_ht40) {
924 if (ctrl_chan_high)
925 channel -= 2;
926 else
927 channel += 2;
928 }
929
930 /* hardware txpower limits ...
931 * saturation (clipping distortion) txpowers are in half-dBm */
932 if (band)
933 saturation_power = priv->calib_info->saturation_power24;
934 else
935 saturation_power = priv->calib_info->saturation_power52;
936
937 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
938 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
939 if (band)
940 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
941 else
942 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
943 }
944
945 /* regulatory txpower limits ... reg_limit values are in half-dBm,
946 * max_power_avg values are in dBm, convert * 2 */
947 if (is_ht40)
948 reg_limit = ch_info->ht40_max_power_avg * 2;
949 else
950 reg_limit = ch_info->max_power_avg * 2;
951
952 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
953 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
954 if (band)
955 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
956 else
957 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
958 }
959
960 /* Interpolate txpower calibration values for this channel,
961 * based on factory calibration tests on spaced channels. */
962 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
963
964 /* calculate tx gain adjustment based on power supply voltage */
965 voltage = le16_to_cpu(priv->calib_info->voltage);
966 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
967 voltage_compensation =
968 iwl4965_get_voltage_compensation(voltage, init_voltage);
969
970 IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
971 init_voltage,
972 voltage, voltage_compensation);
973
974 /* get current temperature (Celsius) */
975 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
976 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
977 current_temp = KELVIN_TO_CELSIUS(current_temp);
978
979 /* select thermal txpower adjustment params, based on channel group
980 * (same frequency group used for mimo txatten adjustment) */
981 degrees_per_05db_num =
982 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
983 degrees_per_05db_denom =
984 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
985
986 /* get per-chain txpower values from factory measurements */
987 for (c = 0; c < 2; c++) {
988 measurement = &ch_eeprom_info.measurements[c][1];
989
990 /* txgain adjustment (in half-dB steps) based on difference
991 * between factory and current temperature */
992 factory_temp = measurement->temperature;
993 iwl4965_math_div_round((current_temp - factory_temp) *
994 degrees_per_05db_denom,
995 degrees_per_05db_num,
996 &temperature_comp[c]);
997
998 factory_gain_index[c] = measurement->gain_idx;
999 factory_actual_pwr[c] = measurement->actual_pow;
1000
1001 IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
1002 IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
1003 "curr tmp %d, comp %d steps\n",
1004 factory_temp, current_temp,
1005 temperature_comp[c]);
1006
1007 IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
1008 factory_gain_index[c],
1009 factory_actual_pwr[c]);
1010 }
1011
1012 /* for each of 33 bit-rates (including 1 for CCK) */
1013 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1014 u8 is_mimo_rate;
1015 union iwl4965_tx_power_dual_stream tx_power;
1016
1017 /* for mimo, reduce each chain's txpower by half
1018 * (3dB, 6 steps), so total output power is regulatory
1019 * compliant. */
1020 if (i & 0x8) {
1021 current_regulatory = reg_limit -
1022 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1023 is_mimo_rate = 1;
1024 } else {
1025 current_regulatory = reg_limit;
1026 is_mimo_rate = 0;
1027 }
1028
1029 /* find txpower limit, either hardware or regulatory */
1030 power_limit = saturation_power - back_off_table[i];
1031 if (power_limit > current_regulatory)
1032 power_limit = current_regulatory;
1033
1034 /* reduce user's txpower request if necessary
1035 * for this rate on this channel */
1036 target_power = user_target_power;
1037 if (target_power > power_limit)
1038 target_power = power_limit;
1039
1040 IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
1041 i, saturation_power - back_off_table[i],
1042 current_regulatory, user_target_power,
1043 target_power);
1044
1045 /* for each of 2 Tx chains (radio transmitters) */
1046 for (c = 0; c < 2; c++) {
1047 s32 atten_value;
1048
1049 if (is_mimo_rate)
1050 atten_value =
1051 (s32)le32_to_cpu(priv->card_alive_init.
1052 tx_atten[txatten_grp][c]);
1053 else
1054 atten_value = 0;
1055
1056 /* calculate index; higher index means lower txpower */
1057 power_index = (u8) (factory_gain_index[c] -
1058 (target_power -
1059 factory_actual_pwr[c]) -
1060 temperature_comp[c] -
1061 voltage_compensation +
1062 atten_value);
1063
1064/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
1065 power_index); */
1066
1067 if (power_index < get_min_power_index(i, band))
1068 power_index = get_min_power_index(i, band);
1069
1070 /* adjust 5 GHz index to support negative indexes */
1071 if (!band)
1072 power_index += 9;
1073
1074 /* CCK, rate 32, reduce txpower for CCK */
1075 if (i == POWER_TABLE_CCK_ENTRY)
1076 power_index +=
1077 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1078
1079 /* stay within the table! */
1080 if (power_index > 107) {
1081 IWL_WARN(priv, "txpower index %d > 107\n",
1082 power_index);
1083 power_index = 107;
1084 }
1085 if (power_index < 0) {
1086 IWL_WARN(priv, "txpower index %d < 0\n",
1087 power_index);
1088 power_index = 0;
1089 }
1090
1091 /* fill txpower command for this rate/chain */
1092 tx_power.s.radio_tx_gain[c] =
1093 gain_table[band][power_index].radio;
1094 tx_power.s.dsp_predis_atten[c] =
1095 gain_table[band][power_index].dsp;
1096
1097 IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
1098 "gain 0x%02x dsp %d\n",
1099 c, atten_value, power_index,
1100 tx_power.s.radio_tx_gain[c],
1101 tx_power.s.dsp_predis_atten[c]);
1102 } /* for each chain */
1103
1104 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1105
1106 } /* for each rate */
1107
1108 return 0;
1109}
1110
1111/**
1112 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
1113 *
1114 * Uses the active RXON for channel, band, and characteristics (ht40, high)
1115 * The power limit is taken from priv->tx_power_user_lmt.
1116 */
1117static int iwl4965_send_tx_power(struct iwl_priv *priv)
1118{
1119 struct iwl4965_txpowertable_cmd cmd = { 0 };
1120 int ret;
1121 u8 band = 0;
1122 bool is_ht40 = false;
1123 u8 ctrl_chan_high = 0;
1124 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1125
1126 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1127 "TX Power requested while scanning!\n"))
1128 return -EAGAIN;
1129
1130 band = priv->band == IEEE80211_BAND_2GHZ;
1131
1132 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1133
1134 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1135 ctrl_chan_high = 1;
1136
1137 cmd.band = band;
1138 cmd.channel = ctx->active.channel;
1139
1140 ret = iwl4965_fill_txpower_tbl(priv, band,
1141 le16_to_cpu(ctx->active.channel),
1142 is_ht40, ctrl_chan_high, &cmd.tx_power);
1143 if (ret)
1144 goto out;
1145
1146 ret = iwl_legacy_send_cmd_pdu(priv,
1147 REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1148
1149out:
1150 return ret;
1151}
1152
1153static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1154 struct iwl_rxon_context *ctx)
1155{
1156 int ret = 0;
1157 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1158 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1159 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1160
1161 if ((rxon1->flags == rxon2->flags) &&
1162 (rxon1->filter_flags == rxon2->filter_flags) &&
1163 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1164 (rxon1->ofdm_ht_single_stream_basic_rates ==
1165 rxon2->ofdm_ht_single_stream_basic_rates) &&
1166 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1167 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1168 (rxon1->rx_chain == rxon2->rx_chain) &&
1169 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1170 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1171 return 0;
1172 }
1173
1174 rxon_assoc.flags = ctx->staging.flags;
1175 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1176 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1177 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1178 rxon_assoc.reserved = 0;
1179 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1180 ctx->staging.ofdm_ht_single_stream_basic_rates;
1181 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1182 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1183 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1184
1185 ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1186 sizeof(rxon_assoc), &rxon_assoc, NULL);
1187
1188 return ret;
1189}
1190
1191static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1192{
1193 /* cast away the const for active_rxon in this function */
1194 struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
1195 int ret;
1196 bool new_assoc =
1197 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1198
1199 if (!iwl_legacy_is_alive(priv))
1200 return -EBUSY;
1201
1202 if (!ctx->is_active)
1203 return 0;
1204
1205 /* always get timestamp with Rx frame */
1206 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1207
1208 ret = iwl_legacy_check_rxon_cmd(priv, ctx);
1209 if (ret) {
1210 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1211 return -EINVAL;
1212 }
1213
1214 /*
1215 * receive commit_rxon request
1216 * abort any previous channel switch if still in process
1217 */
1218 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1219 (priv->switch_channel != ctx->staging.channel)) {
1220 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1221 le16_to_cpu(priv->switch_channel));
1222 iwl_legacy_chswitch_done(priv, false);
1223 }
1224
1225 /* If we don't need to send a full RXON, we can use
1226 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1227 * and other flags for the current radio configuration. */
1228 if (!iwl_legacy_full_rxon_required(priv, ctx)) {
1229 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
1230 if (ret) {
1231 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1232 return ret;
1233 }
1234
1235 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1236 iwl_legacy_print_rx_config_cmd(priv, ctx);
1237 /*
1238 * We do not commit tx power settings while channel changing,
1239 * do it now if tx power changed.
1240 */
1241 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1242 return 0;
1243 }
1244
1245 /* If we are currently associated and the new config requires
1246 * an RXON_ASSOC and the new config wants the associated mask enabled,
1247 * we must clear the associated from the active configuration
1248 * before we apply the new config */
1249 if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
1250 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1251 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1252
1253 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1254 sizeof(struct iwl_legacy_rxon_cmd),
1255 active_rxon);
1256
1257 /* If the mask clearing failed then we set
1258 * active_rxon back to what it was previously */
1259 if (ret) {
1260 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1261 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1262 return ret;
1263 }
1264 iwl_legacy_clear_ucode_stations(priv, ctx);
1265 iwl_legacy_restore_stations(priv, ctx);
1266 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1267 if (ret) {
1268 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1269 return ret;
1270 }
1271 }
1272
1273 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1274 "* with%s RXON_FILTER_ASSOC_MSK\n"
1275 "* channel = %d\n"
1276 "* bssid = %pM\n",
1277 (new_assoc ? "" : "out"),
1278 le16_to_cpu(ctx->staging.channel),
1279 ctx->staging.bssid_addr);
1280
1281 iwl_legacy_set_rxon_hwcrypto(priv, ctx,
1282 !priv->cfg->mod_params->sw_crypto);
1283
1284 /* Apply the new configuration
1285 * RXON unassoc clears the station table in uCode so restoration of
1286 * stations is needed after it (the RXON command) completes
1287 */
1288 if (!new_assoc) {
1289 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1290 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1291 if (ret) {
1292 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1293 return ret;
1294 }
1295 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1296 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1297 iwl_legacy_clear_ucode_stations(priv, ctx);
1298 iwl_legacy_restore_stations(priv, ctx);
1299 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1300 if (ret) {
1301 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1302 return ret;
1303 }
1304 }
1305 if (new_assoc) {
1306 priv->start_calib = 0;
1307 /* Apply the new configuration
1308 * RXON assoc doesn't clear the station table in uCode,
1309 */
1310 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1311 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1312 if (ret) {
1313 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1314 return ret;
1315 }
1316 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1317 }
1318 iwl_legacy_print_rx_config_cmd(priv, ctx);
1319
1320 iwl4965_init_sensitivity(priv);
1321
1322 /* If we issue a new RXON command which required a tune then we must
1323 * send a new TXPOWER command or we won't be able to Tx any frames */
1324 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1325 if (ret) {
1326 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1327 return ret;
1328 }
1329
1330 return 0;
1331}
1332
1333static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1334 struct ieee80211_channel_switch *ch_switch)
1335{
1336 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1337 int rc;
1338 u8 band = 0;
1339 bool is_ht40 = false;
1340 u8 ctrl_chan_high = 0;
1341 struct iwl4965_channel_switch_cmd cmd;
1342 const struct iwl_channel_info *ch_info;
1343 u32 switch_time_in_usec, ucode_switch_time;
1344 u16 ch;
1345 u32 tsf_low;
1346 u8 switch_count;
1347 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1348 struct ieee80211_vif *vif = ctx->vif;
1349 band = priv->band == IEEE80211_BAND_2GHZ;
1350
1351 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1352
1353 if (is_ht40 &&
1354 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1355 ctrl_chan_high = 1;
1356
1357 cmd.band = band;
1358 cmd.expect_beacon = 0;
1359 ch = ch_switch->channel->hw_value;
1360 cmd.channel = cpu_to_le16(ch);
1361 cmd.rxon_flags = ctx->staging.flags;
1362 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1363 switch_count = ch_switch->count;
1364 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1365 /*
1366 * calculate the ucode channel switch time
1367 * adding TSF as one of the factor for when to switch
1368 */
1369 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
1370 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
1371 beacon_interval)) {
1372 switch_count -= (priv->ucode_beacon_time -
1373 tsf_low) / beacon_interval;
1374 } else
1375 switch_count = 0;
1376 }
1377 if (switch_count <= 1)
1378 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1379 else {
1380 switch_time_in_usec =
1381 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1382 ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
1383 switch_time_in_usec,
1384 beacon_interval);
1385 cmd.switch_time = iwl_legacy_add_beacon_time(priv,
1386 priv->ucode_beacon_time,
1387 ucode_switch_time,
1388 beacon_interval);
1389 }
1390 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1391 cmd.switch_time);
1392 ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
1393 if (ch_info)
1394 cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
1395 else {
1396 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1397 ctx->active.channel, ch);
1398 return -EFAULT;
1399 }
1400
1401 rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
1402 ctrl_chan_high, &cmd.tx_power);
1403 if (rc) {
1404 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
1405 return rc;
1406 }
1407
1408 return iwl_legacy_send_cmd_pdu(priv,
1409 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1410}
1411
1412/**
1413 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1414 */
1415static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
1416 struct iwl_tx_queue *txq,
1417 u16 byte_cnt)
1418{
1419 struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
1420 int txq_id = txq->q.id;
1421 int write_ptr = txq->q.write_ptr;
1422 int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1423 __le16 bc_ent;
1424
1425 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1426
1427 bc_ent = cpu_to_le16(len & 0xFFF);
1428 /* Set up byte count within first 256 entries */
1429 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1430
1431 /* If within first 64 entries, duplicate at end */
1432 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1433 scd_bc_tbl[txq_id].
1434 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
1435}
1436
1437/**
1438 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1439 * @statistics: Provides the temperature reading from the uCode
1440 *
1441 * A return of <0 indicates bogus data in the statistics
1442 */
1443static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1444{
1445 s32 temperature;
1446 s32 vt;
1447 s32 R1, R2, R3;
1448 u32 R4;
1449
1450 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1451 (priv->_4965.statistics.flag &
1452 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
1453 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
1454 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1455 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1456 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1457 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1458 } else {
1459 IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
1460 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1461 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1462 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1463 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1464 }
1465
1466 /*
1467 * Temperature is only 23 bits, so sign extend out to 32.
1468 *
1469 * NOTE If we haven't received a statistics notification yet
1470 * with an updated temperature, use R4 provided to us in the
1471 * "initialize" ALIVE response.
1472 */
1473 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1474 vt = sign_extend32(R4, 23);
1475 else
1476 vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
1477 general.common.temperature), 23);
1478
1479 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1480
1481 if (R3 == R1) {
1482 IWL_ERR(priv, "Calibration conflict R1 == R3\n");
1483 return -1;
1484 }
1485
1486 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1487 * Add offset to center the adjustment around 0 degrees Centigrade. */
1488 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1489 temperature /= (R3 - R1);
1490 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1491
1492 IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
1493 temperature, KELVIN_TO_CELSIUS(temperature));
1494
1495 return temperature;
1496}
1497
1498/* Adjust Txpower only if temperature variance is greater than threshold. */
1499#define IWL_TEMPERATURE_THRESHOLD 3
1500
1501/**
1502 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1503 *
1504 * If the temperature changed has changed sufficiently, then a recalibration
1505 * is needed.
1506 *
1507 * Assumes caller will replace priv->last_temperature once calibration
1508 * executed.
1509 */
1510static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
1511{
1512 int temp_diff;
1513
1514 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
1515 IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
1516 return 0;
1517 }
1518
1519 temp_diff = priv->temperature - priv->last_temperature;
1520
1521 /* get absolute value */
1522 if (temp_diff < 0) {
1523 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
1524 temp_diff = -temp_diff;
1525 } else if (temp_diff == 0)
1526 IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
1527 else
1528 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
1529
1530 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1531 IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
1532 return 0;
1533 }
1534
1535 IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
1536
1537 return 1;
1538}
1539
1540static void iwl4965_temperature_calib(struct iwl_priv *priv)
1541{
1542 s32 temp;
1543
1544 temp = iwl4965_hw_get_temperature(priv);
1545 if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1546 return;
1547
1548 if (priv->temperature != temp) {
1549 if (priv->temperature)
1550 IWL_DEBUG_TEMP(priv, "Temperature changed "
1551 "from %dC to %dC\n",
1552 KELVIN_TO_CELSIUS(priv->temperature),
1553 KELVIN_TO_CELSIUS(temp));
1554 else
1555 IWL_DEBUG_TEMP(priv, "Temperature "
1556 "initialized to %dC\n",
1557 KELVIN_TO_CELSIUS(temp));
1558 }
1559
1560 priv->temperature = temp;
1561 set_bit(STATUS_TEMPERATURE, &priv->status);
1562
1563 if (!priv->disable_tx_power_cal &&
1564 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1565 iwl4965_is_temp_calib_needed(priv))
1566 queue_work(priv->workqueue, &priv->txpower_work);
1567}
1568
1569static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1570{
1571 switch (cmd_id) {
1572 case REPLY_RXON:
1573 return (u16) sizeof(struct iwl4965_rxon_cmd);
1574 default:
1575 return len;
1576 }
1577}
1578
1579static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
1580 u8 *data)
1581{
1582 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1583 addsta->mode = cmd->mode;
1584 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1585 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
1586 addsta->station_flags = cmd->station_flags;
1587 addsta->station_flags_msk = cmd->station_flags_msk;
1588 addsta->tid_disable_tx = cmd->tid_disable_tx;
1589 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1590 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1591 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1592 addsta->sleep_tx_count = cmd->sleep_tx_count;
1593 addsta->reserved1 = cpu_to_le16(0);
1594 addsta->reserved2 = cpu_to_le16(0);
1595
1596 return (u16)sizeof(struct iwl4965_addsta_cmd);
1597}
1598
1599static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
1600{
1601 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
1602}
1603
1604/**
1605 * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
1606 */
1607static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1608 struct iwl_ht_agg *agg,
1609 struct iwl4965_tx_resp *tx_resp,
1610 int txq_id, u16 start_idx)
1611{
1612 u16 status;
1613 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
1614 struct ieee80211_tx_info *info = NULL;
1615 struct ieee80211_hdr *hdr = NULL;
1616 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1617 int i, sh, idx;
1618 u16 seq;
1619 if (agg->wait_for_ba)
1620 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
1621
1622 agg->frame_count = tx_resp->frame_count;
1623 agg->start_idx = start_idx;
1624 agg->rate_n_flags = rate_n_flags;
1625 agg->bitmap = 0;
1626
1627 /* num frames attempted by Tx command */
1628 if (agg->frame_count == 1) {
1629 /* Only one frame was attempted; no block-ack will arrive */
1630 status = le16_to_cpu(frame_status[0].status);
1631 idx = start_idx;
1632
1633 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
1634 agg->frame_count, agg->start_idx, idx);
1635
1636 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
1637 info->status.rates[0].count = tx_resp->failure_frame + 1;
1638 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1639 info->flags |= iwl4965_tx_status_to_mac80211(status);
1640 iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
1641
1642 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
1643 status & 0xff, tx_resp->failure_frame);
1644 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
1645
1646 agg->wait_for_ba = 0;
1647 } else {
1648 /* Two or more frames were attempted; expect block-ack */
1649 u64 bitmap = 0;
1650 int start = agg->start_idx;
1651
1652 /* Construct bit-map of pending frames within Tx window */
1653 for (i = 0; i < agg->frame_count; i++) {
1654 u16 sc;
1655 status = le16_to_cpu(frame_status[i].status);
1656 seq = le16_to_cpu(frame_status[i].sequence);
1657 idx = SEQ_TO_INDEX(seq);
1658 txq_id = SEQ_TO_QUEUE(seq);
1659
1660 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1661 AGG_TX_STATE_ABORT_MSK))
1662 continue;
1663
1664 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
1665 agg->frame_count, txq_id, idx);
1666
1667 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
1668 if (!hdr) {
1669 IWL_ERR(priv,
1670 "BUG_ON idx doesn't point to valid skb"
1671 " idx=%d, txq_id=%d\n", idx, txq_id);
1672 return -1;
1673 }
1674
1675 sc = le16_to_cpu(hdr->seq_ctrl);
1676 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1677 IWL_ERR(priv,
1678 "BUG_ON idx doesn't match seq control"
1679 " idx=%d, seq_idx=%d, seq=%d\n",
1680 idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
1681 return -1;
1682 }
1683
1684 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
1685 i, idx, SEQ_TO_SN(sc));
1686
1687 sh = idx - start;
1688 if (sh > 64) {
1689 sh = (start - idx) + 0xff;
1690 bitmap = bitmap << sh;
1691 sh = 0;
1692 start = idx;
1693 } else if (sh < -64)
1694 sh = 0xff - (start - idx);
1695 else if (sh < 0) {
1696 sh = start - idx;
1697 start = idx;
1698 bitmap = bitmap << sh;
1699 sh = 0;
1700 }
1701 bitmap |= 1ULL << sh;
1702 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
1703 start, (unsigned long long)bitmap);
1704 }
1705
1706 agg->bitmap = bitmap;
1707 agg->start_idx = start;
1708 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
1709 agg->frame_count, agg->start_idx,
1710 (unsigned long long)agg->bitmap);
1711
1712 if (bitmap)
1713 agg->wait_for_ba = 1;
1714 }
1715 return 0;
1716}
1717
1718static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
1719{
1720 int i;
1721 int start = 0;
1722 int ret = IWL_INVALID_STATION;
1723 unsigned long flags;
1724
1725 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
1726 start = IWL_STA_ID;
1727
1728 if (is_broadcast_ether_addr(addr))
1729 return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
1730
1731 spin_lock_irqsave(&priv->sta_lock, flags);
1732 for (i = start; i < priv->hw_params.max_stations; i++)
1733 if (priv->stations[i].used &&
1734 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
1735 addr))) {
1736 ret = i;
1737 goto out;
1738 }
1739
1740 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
1741 addr, priv->num_stations);
1742
1743 out:
1744 /*
1745 * It may be possible that more commands interacting with stations
1746 * arrive before we completed processing the adding of
1747 * station
1748 */
1749 if (ret != IWL_INVALID_STATION &&
1750 (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
1751 ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
1752 (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
1753 IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
1754 ret);
1755 ret = IWL_INVALID_STATION;
1756 }
1757 spin_unlock_irqrestore(&priv->sta_lock, flags);
1758 return ret;
1759}
1760
1761static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1762{
1763 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
1764 return IWL_AP_ID;
1765 } else {
1766 u8 *da = ieee80211_get_DA(hdr);
1767 return iwl4965_find_station(priv, da);
1768 }
1769}
1770
1771/**
1772 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
1773 */
1774static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
1775 struct iwl_rx_mem_buffer *rxb)
1776{
1777 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1778 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1779 int txq_id = SEQ_TO_QUEUE(sequence);
1780 int index = SEQ_TO_INDEX(sequence);
1781 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1782 struct ieee80211_hdr *hdr;
1783 struct ieee80211_tx_info *info;
1784 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1785 u32 status = le32_to_cpu(tx_resp->u.status);
1786 int uninitialized_var(tid);
1787 int sta_id;
1788 int freed;
1789 u8 *qc = NULL;
1790 unsigned long flags;
1791
1792 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
1793 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
1794 "is out of range [0-%d] %d %d\n", txq_id,
1795 index, txq->q.n_bd, txq->q.write_ptr,
1796 txq->q.read_ptr);
1797 return;
1798 }
1799
1800 txq->time_stamp = jiffies;
1801 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
1802 memset(&info->status, 0, sizeof(info->status));
1803
1804 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
1805 if (ieee80211_is_data_qos(hdr->frame_control)) {
1806 qc = ieee80211_get_qos_ctl(hdr);
1807 tid = qc[0] & 0xf;
1808 }
1809
1810 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
1811 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
1812 IWL_ERR(priv, "Station not known\n");
1813 return;
1814 }
1815
1816 spin_lock_irqsave(&priv->sta_lock, flags);
1817 if (txq->sched_retry) {
1818 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
1819 struct iwl_ht_agg *agg = NULL;
1820 WARN_ON(!qc);
1821
1822 agg = &priv->stations[sta_id].tid[tid].agg;
1823
1824 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
1825
1826 /* check if BAR is needed */
1827 if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
1828 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1829
1830 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1831 index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
1832 txq->q.n_bd);
1833 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
1834 "%d index %d\n", scd_ssn , index);
1835 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1836 if (qc)
1837 iwl4965_free_tfds_in_queue(priv, sta_id,
1838 tid, freed);
1839
1840 if (priv->mac80211_registered &&
1841 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
1842 && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1843 iwl_legacy_wake_queue(priv, txq);
1844 }
1845 } else {
1846 info->status.rates[0].count = tx_resp->failure_frame + 1;
1847 info->flags |= iwl4965_tx_status_to_mac80211(status);
1848 iwl4965_hwrate_to_tx_control(priv,
1849 le32_to_cpu(tx_resp->rate_n_flags),
1850 info);
1851
1852 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
1853 "rate_n_flags 0x%x retries %d\n",
1854 txq_id,
1855 iwl4965_get_tx_fail_reason(status), status,
1856 le32_to_cpu(tx_resp->rate_n_flags),
1857 tx_resp->failure_frame);
1858
1859 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1860 if (qc && likely(sta_id != IWL_INVALID_STATION))
1861 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1862 else if (sta_id == IWL_INVALID_STATION)
1863 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
1864
1865 if (priv->mac80211_registered &&
1866 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
1867 iwl_legacy_wake_queue(priv, txq);
1868 }
1869 if (qc && likely(sta_id != IWL_INVALID_STATION))
1870 iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
1871
1872 iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
1873
1874 spin_unlock_irqrestore(&priv->sta_lock, flags);
1875}
1876
1877static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
1878 struct iwl_rx_mem_buffer *rxb)
1879{
1880 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1881 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
1882 u8 rate __maybe_unused =
1883 iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
1884
1885 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
1886 "tsf:0x%.8x%.8x rate:%d\n",
1887 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
1888 beacon->beacon_notify_hdr.failure_frame,
1889 le32_to_cpu(beacon->ibss_mgr_status),
1890 le32_to_cpu(beacon->high_tsf),
1891 le32_to_cpu(beacon->low_tsf), rate);
1892
1893 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
1894}
1895
1896/* Set up 4965-specific Rx frame reply handlers */
1897static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
1898{
1899 /* Legacy Rx frames */
1900 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
1901 /* Tx response */
1902 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
1903 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
1904}
1905
1906static struct iwl_hcmd_ops iwl4965_hcmd = {
1907 .rxon_assoc = iwl4965_send_rxon_assoc,
1908 .commit_rxon = iwl4965_commit_rxon,
1909 .set_rxon_chain = iwl4965_set_rxon_chain,
1910};
1911
1912static void iwl4965_post_scan(struct iwl_priv *priv)
1913{
1914 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1915
1916 /*
1917 * Since setting the RXON may have been deferred while
1918 * performing the scan, fire one off if needed
1919 */
1920 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1921 iwl_legacy_commit_rxon(priv, ctx);
1922}
1923
1924static void iwl4965_post_associate(struct iwl_priv *priv)
1925{
1926 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1927 struct ieee80211_vif *vif = ctx->vif;
1928 struct ieee80211_conf *conf = NULL;
1929 int ret = 0;
1930
1931 if (!vif || !priv->is_open)
1932 return;
1933
1934 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1935 return;
1936
1937 iwl_legacy_scan_cancel_timeout(priv, 200);
1938
1939 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
1940
1941 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1942 iwl_legacy_commit_rxon(priv, ctx);
1943
1944 ret = iwl_legacy_send_rxon_timing(priv, ctx);
1945 if (ret)
1946 IWL_WARN(priv, "RXON timing - "
1947 "Attempting to continue.\n");
1948
1949 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1950
1951 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
1952
1953 if (priv->cfg->ops->hcmd->set_rxon_chain)
1954 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1955
1956 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1957
1958 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
1959 vif->bss_conf.aid, vif->bss_conf.beacon_int);
1960
1961 if (vif->bss_conf.use_short_preamble)
1962 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1963 else
1964 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1965
1966 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
1967 if (vif->bss_conf.use_short_slot)
1968 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1969 else
1970 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1971 }
1972
1973 iwl_legacy_commit_rxon(priv, ctx);
1974
1975 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
1976 vif->bss_conf.aid, ctx->active.bssid_addr);
1977
1978 switch (vif->type) {
1979 case NL80211_IFTYPE_STATION:
1980 break;
1981 case NL80211_IFTYPE_ADHOC:
1982 iwl4965_send_beacon_cmd(priv);
1983 break;
1984 default:
1985 IWL_ERR(priv, "%s Should not be called in %d mode\n",
1986 __func__, vif->type);
1987 break;
1988 }
1989
1990 /* the chain noise calibration will enabled PM upon completion
1991 * If chain noise has already been run, then we need to enable
1992 * power management here */
1993 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1994 iwl_legacy_power_update_mode(priv, false);
1995
1996 /* Enable Rx differential gain and sensitivity calibrations */
1997 iwl4965_chain_noise_reset(priv);
1998 priv->start_calib = 1;
1999}
2000
2001static void iwl4965_config_ap(struct iwl_priv *priv)
2002{
2003 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2004 struct ieee80211_vif *vif = ctx->vif;
2005 int ret = 0;
2006
2007 lockdep_assert_held(&priv->mutex);
2008
2009 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2010 return;
2011
2012 /* The following should be done only at AP bring up */
2013 if (!iwl_legacy_is_associated_ctx(ctx)) {
2014
2015 /* RXON - unassoc (to set timing command) */
2016 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2017 iwl_legacy_commit_rxon(priv, ctx);
2018
2019 /* RXON Timing */
2020 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2021 if (ret)
2022 IWL_WARN(priv, "RXON timing failed - "
2023 "Attempting to continue.\n");
2024
2025 /* AP has all antennas */
2026 priv->chain_noise_data.active_chains =
2027 priv->hw_params.valid_rx_ant;
2028 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2029 if (priv->cfg->ops->hcmd->set_rxon_chain)
2030 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2031
2032 ctx->staging.assoc_id = 0;
2033
2034 if (vif->bss_conf.use_short_preamble)
2035 ctx->staging.flags |=
2036 RXON_FLG_SHORT_PREAMBLE_MSK;
2037 else
2038 ctx->staging.flags &=
2039 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2040
2041 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2042 if (vif->bss_conf.use_short_slot)
2043 ctx->staging.flags |=
2044 RXON_FLG_SHORT_SLOT_MSK;
2045 else
2046 ctx->staging.flags &=
2047 ~RXON_FLG_SHORT_SLOT_MSK;
2048 }
2049 /* need to send beacon cmd before committing assoc RXON! */
2050 iwl4965_send_beacon_cmd(priv);
2051 /* restore RXON assoc */
2052 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2053 iwl_legacy_commit_rxon(priv, ctx);
2054 }
2055 iwl4965_send_beacon_cmd(priv);
2056}
2057
2058static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2059 .get_hcmd_size = iwl4965_get_hcmd_size,
2060 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2061 .request_scan = iwl4965_request_scan,
2062 .post_scan = iwl4965_post_scan,
2063};
2064
2065static struct iwl_lib_ops iwl4965_lib = {
2066 .set_hw_params = iwl4965_hw_set_hw_params,
2067 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
2068 .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
2069 .txq_free_tfd = iwl4965_hw_txq_free_tfd,
2070 .txq_init = iwl4965_hw_tx_queue_init,
2071 .rx_handler_setup = iwl4965_rx_handler_setup,
2072 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2073 .init_alive_start = iwl4965_init_alive_start,
2074 .load_ucode = iwl4965_load_bsm,
2075 .dump_nic_error_log = iwl4965_dump_nic_error_log,
2076 .dump_fh = iwl4965_dump_fh,
2077 .set_channel_switch = iwl4965_hw_channel_switch,
2078 .apm_ops = {
2079 .init = iwl_legacy_apm_init,
2080 .config = iwl4965_nic_config,
2081 },
2082 .eeprom_ops = {
2083 .regulatory_bands = {
2084 EEPROM_REGULATORY_BAND_1_CHANNELS,
2085 EEPROM_REGULATORY_BAND_2_CHANNELS,
2086 EEPROM_REGULATORY_BAND_3_CHANNELS,
2087 EEPROM_REGULATORY_BAND_4_CHANNELS,
2088 EEPROM_REGULATORY_BAND_5_CHANNELS,
2089 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2090 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
2091 },
2092 .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
2093 .release_semaphore = iwl4965_eeprom_release_semaphore,
2094 },
2095 .send_tx_power = iwl4965_send_tx_power,
2096 .update_chain_flags = iwl4965_update_chain_flags,
2097 .temp_ops = {
2098 .temperature = iwl4965_temperature_calib,
2099 },
2100 .debugfs_ops = {
2101 .rx_stats_read = iwl4965_ucode_rx_stats_read,
2102 .tx_stats_read = iwl4965_ucode_tx_stats_read,
2103 .general_stats_read = iwl4965_ucode_general_stats_read,
2104 },
2105};
2106
2107static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2108 .post_associate = iwl4965_post_associate,
2109 .config_ap = iwl4965_config_ap,
2110 .manage_ibss_station = iwl4965_manage_ibss_station,
2111 .update_bcast_stations = iwl4965_update_bcast_stations,
2112};
2113
2114struct ieee80211_ops iwl4965_hw_ops = {
2115 .tx = iwl4965_mac_tx,
2116 .start = iwl4965_mac_start,
2117 .stop = iwl4965_mac_stop,
2118 .add_interface = iwl_legacy_mac_add_interface,
2119 .remove_interface = iwl_legacy_mac_remove_interface,
2120 .change_interface = iwl_legacy_mac_change_interface,
2121 .config = iwl_legacy_mac_config,
2122 .configure_filter = iwl4965_configure_filter,
2123 .set_key = iwl4965_mac_set_key,
2124 .update_tkip_key = iwl4965_mac_update_tkip_key,
2125 .conf_tx = iwl_legacy_mac_conf_tx,
2126 .reset_tsf = iwl_legacy_mac_reset_tsf,
2127 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2128 .ampdu_action = iwl4965_mac_ampdu_action,
2129 .hw_scan = iwl_legacy_mac_hw_scan,
2130 .sta_add = iwl4965_mac_sta_add,
2131 .sta_remove = iwl_legacy_mac_sta_remove,
2132 .channel_switch = iwl4965_mac_channel_switch,
2133 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
2134};
2135
2136static const struct iwl_ops iwl4965_ops = {
2137 .lib = &iwl4965_lib,
2138 .hcmd = &iwl4965_hcmd,
2139 .utils = &iwl4965_hcmd_utils,
2140 .led = &iwl4965_led_ops,
2141 .legacy = &iwl4965_legacy_ops,
2142 .ieee80211_ops = &iwl4965_hw_ops,
2143};
2144
2145static struct iwl_base_params iwl4965_base_params = {
2146 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
2147 .num_of_queues = IWL49_NUM_QUEUES,
2148 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2149 .pll_cfg_val = 0,
2150 .set_l0s = true,
2151 .use_bsm = true,
2152 .led_compensation = 61,
2153 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2154 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2155 .temperature_kelvin = true,
2156 .ucode_tracing = true,
2157 .sensitivity_calib_by_driver = true,
2158 .chain_noise_calib_by_driver = true,
2159};
2160
2161struct iwl_cfg iwl4965_cfg = {
2162 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2163 .fw_name_pre = IWL4965_FW_PRE,
2164 .ucode_api_max = IWL4965_UCODE_API_MAX,
2165 .ucode_api_min = IWL4965_UCODE_API_MIN,
2166 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2167 .valid_tx_ant = ANT_AB,
2168 .valid_rx_ant = ANT_ABC,
2169 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2170 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2171 .ops = &iwl4965_ops,
2172 .mod_params = &iwl4965_mod_params,
2173 .base_params = &iwl4965_base_params,
2174 .led_mode = IWL_LED_BLINK,
2175 /*
2176 * Force use of chains B and C for scan RX on 5 GHz band
2177 * because the device has off-channel reception on chain A.
2178 */
2179 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2180};
2181
2182/* Module firmware */
2183MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
deleted file mode 100644
index 01f8163daf16..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.h
+++ /dev/null
@@ -1,282 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_4965_h__
64#define __iwl_4965_h__
65
66#include "iwl-dev.h"
67
68/* configuration for the _4965 devices */
69extern struct iwl_cfg iwl4965_cfg;
70
71extern struct iwl_mod_params iwl4965_mod_params;
72
73extern struct ieee80211_ops iwl4965_hw_ops;
74
75/* tx queue */
76void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
77 int sta_id, int tid, int freed);
78
79/* RXON */
80void iwl4965_set_rxon_chain(struct iwl_priv *priv,
81 struct iwl_rxon_context *ctx);
82
83/* uCode */
84int iwl4965_verify_ucode(struct iwl_priv *priv);
85
86/* lib */
87void iwl4965_check_abort_status(struct iwl_priv *priv,
88 u8 frame_count, u32 status);
89
90void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
91int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
92int iwl4965_hw_nic_init(struct iwl_priv *priv);
93int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
94
95/* rx */
96void iwl4965_rx_queue_restock(struct iwl_priv *priv);
97void iwl4965_rx_replenish(struct iwl_priv *priv);
98void iwl4965_rx_replenish_now(struct iwl_priv *priv);
99void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
100int iwl4965_rxq_stop(struct iwl_priv *priv);
101int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
102void iwl4965_rx_reply_rx(struct iwl_priv *priv,
103 struct iwl_rx_mem_buffer *rxb);
104void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
105 struct iwl_rx_mem_buffer *rxb);
106void iwl4965_rx_handle(struct iwl_priv *priv);
107
108/* tx */
109void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
110int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq,
112 dma_addr_t addr, u16 len, u8 reset, u8 pad);
113int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq);
115void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
116 struct ieee80211_tx_info *info);
117int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
118int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
119 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
120int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
121 struct ieee80211_sta *sta, u16 tid);
122int iwl4965_txq_check_empty(struct iwl_priv *priv,
123 int sta_id, u8 tid, int txq_id);
124void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
125 struct iwl_rx_mem_buffer *rxb);
126int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
127void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
128int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
129void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
130void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
131void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
132
133/*
134 * Acquire priv->lock before calling this function !
135 */
136void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
137/**
138 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
139 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
140 * @scd_retry: (1) Indicates queue will be used in aggregation mode
141 *
142 * NOTE: Acquire priv->lock before calling this function !
143 */
144void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
145 struct iwl_tx_queue *txq,
146 int tx_fifo_id, int scd_retry);
147
148static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
149{
150 status &= TX_STATUS_MSK;
151
152 switch (status) {
153 case TX_STATUS_SUCCESS:
154 case TX_STATUS_DIRECT_DONE:
155 return IEEE80211_TX_STAT_ACK;
156 case TX_STATUS_FAIL_DEST_PS:
157 return IEEE80211_TX_STAT_TX_FILTERED;
158 default:
159 return 0;
160 }
161}
162
163static inline bool iwl4965_is_tx_success(u32 status)
164{
165 status &= TX_STATUS_MSK;
166 return (status == TX_STATUS_SUCCESS) ||
167 (status == TX_STATUS_DIRECT_DONE);
168}
169
170u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
171
172/* rx */
173void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
174 struct iwl_rx_mem_buffer *rxb);
175bool iwl4965_good_plcp_health(struct iwl_priv *priv,
176 struct iwl_rx_packet *pkt);
177void iwl4965_rx_statistics(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb);
179void iwl4965_reply_statistics(struct iwl_priv *priv,
180 struct iwl_rx_mem_buffer *rxb);
181
182/* scan */
183int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
184
185/* station mgmt */
186int iwl4965_manage_ibss_station(struct iwl_priv *priv,
187 struct ieee80211_vif *vif, bool add);
188
189/* hcmd */
190int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
191
192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
193const char *iwl4965_get_tx_fail_reason(u32 status);
194#else
195static inline const char *
196iwl4965_get_tx_fail_reason(u32 status) { return ""; }
197#endif
198
199/* station management */
200int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
201 struct iwl_rxon_context *ctx);
202int iwl4965_add_bssid_station(struct iwl_priv *priv,
203 struct iwl_rxon_context *ctx,
204 const u8 *addr, u8 *sta_id_r);
205int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
206 struct iwl_rxon_context *ctx,
207 struct ieee80211_key_conf *key);
208int iwl4965_set_default_wep_key(struct iwl_priv *priv,
209 struct iwl_rxon_context *ctx,
210 struct ieee80211_key_conf *key);
211int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
212 struct iwl_rxon_context *ctx);
213int iwl4965_set_dynamic_key(struct iwl_priv *priv,
214 struct iwl_rxon_context *ctx,
215 struct ieee80211_key_conf *key, u8 sta_id);
216int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
217 struct iwl_rxon_context *ctx,
218 struct ieee80211_key_conf *key, u8 sta_id);
219void iwl4965_update_tkip_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf,
222 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
223int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
224 int sta_id, int tid);
225int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
226 int tid, u16 ssn);
227int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
228 int tid);
229void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
230 int sta_id, int cnt);
231int iwl4965_update_bcast_stations(struct iwl_priv *priv);
232
233/* rate */
234static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
235{
236 return BIT(ant_idx) << RATE_MCS_ANT_POS;
237}
238
239static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
240{
241 return le32_to_cpu(rate_n_flags) & 0xFF;
242}
243
244static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
245{
246 return cpu_to_le32(flags|(u32)rate);
247}
248
249/* eeprom */
250void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
251int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
252void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
253int iwl4965_eeprom_check_version(struct iwl_priv *priv);
254
255/* mac80211 handlers (for 4965) */
256void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
257int iwl4965_mac_start(struct ieee80211_hw *hw);
258void iwl4965_mac_stop(struct ieee80211_hw *hw);
259void iwl4965_configure_filter(struct ieee80211_hw *hw,
260 unsigned int changed_flags,
261 unsigned int *total_flags,
262 u64 multicast);
263int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
264 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
265 struct ieee80211_key_conf *key);
266void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
267 struct ieee80211_vif *vif,
268 struct ieee80211_key_conf *keyconf,
269 struct ieee80211_sta *sta,
270 u32 iv32, u16 *phase1key);
271int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
272 struct ieee80211_vif *vif,
273 enum ieee80211_ampdu_mlme_action action,
274 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
275 u8 buf_size);
276int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
277 struct ieee80211_vif *vif,
278 struct ieee80211_sta *sta);
279void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
280 struct ieee80211_channel_switch *ch_switch);
281
282#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
deleted file mode 100644
index 2bd5659310d7..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ /dev/null
@@ -1,2661 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-dev.h"
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-power.h"
42#include "iwl-sta.h"
43#include "iwl-helpers.h"
44
45
46MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
67static bool bt_coex_active = true;
68module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70
71u32 iwlegacy_debug_level;
72EXPORT_SYMBOL(iwlegacy_debug_level);
73
74const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
75EXPORT_SYMBOL(iwlegacy_bcast_addr);
76
77
78/* This function both allocates and initializes hw and priv. */
79struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
80{
81 struct iwl_priv *priv;
82 /* mac80211 allocates memory for this device instance, including
83 * space for this driver's private structure */
84 struct ieee80211_hw *hw;
85
86 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
87 cfg->ops->ieee80211_ops);
88 if (hw == NULL) {
89 pr_err("%s: Can not allocate network device\n",
90 cfg->name);
91 goto out;
92 }
93
94 priv = hw->priv;
95 priv->hw = hw;
96
97out:
98 return hw;
99}
100EXPORT_SYMBOL(iwl_legacy_alloc_all);
101
102#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
103#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
104static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
105 struct ieee80211_sta_ht_cap *ht_info,
106 enum ieee80211_band band)
107{
108 u16 max_bit_rate = 0;
109 u8 rx_chains_num = priv->hw_params.rx_chains_num;
110 u8 tx_chains_num = priv->hw_params.tx_chains_num;
111
112 ht_info->cap = 0;
113 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
114
115 ht_info->ht_supported = true;
116
117 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
118 max_bit_rate = MAX_BIT_RATE_20_MHZ;
119 if (priv->hw_params.ht40_channel & BIT(band)) {
120 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
121 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
122 ht_info->mcs.rx_mask[4] = 0x01;
123 max_bit_rate = MAX_BIT_RATE_40_MHZ;
124 }
125
126 if (priv->cfg->mod_params->amsdu_size_8K)
127 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
128
129 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
130 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
131
132 ht_info->mcs.rx_mask[0] = 0xFF;
133 if (rx_chains_num >= 2)
134 ht_info->mcs.rx_mask[1] = 0xFF;
135 if (rx_chains_num >= 3)
136 ht_info->mcs.rx_mask[2] = 0xFF;
137
138 /* Highest supported Rx data rate */
139 max_bit_rate *= rx_chains_num;
140 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
141 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
142
143 /* Tx MCS capabilities */
144 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
145 if (tx_chains_num != rx_chains_num) {
146 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
147 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
148 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
149 }
150}
151
152/**
153 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
154 */
155int iwl_legacy_init_geos(struct iwl_priv *priv)
156{
157 struct iwl_channel_info *ch;
158 struct ieee80211_supported_band *sband;
159 struct ieee80211_channel *channels;
160 struct ieee80211_channel *geo_ch;
161 struct ieee80211_rate *rates;
162 int i = 0;
163 s8 max_tx_power = 0;
164
165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
167 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
168 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
169 return 0;
170 }
171
172 channels = kzalloc(sizeof(struct ieee80211_channel) *
173 priv->channel_count, GFP_KERNEL);
174 if (!channels)
175 return -ENOMEM;
176
177 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
178 GFP_KERNEL);
179 if (!rates) {
180 kfree(channels);
181 return -ENOMEM;
182 }
183
184 /* 5.2GHz channels start after the 2.4GHz channels */
185 sband = &priv->bands[IEEE80211_BAND_5GHZ];
186 sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
187 /* just OFDM */
188 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
189 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
190
191 if (priv->cfg->sku & IWL_SKU_N)
192 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
193 IEEE80211_BAND_5GHZ);
194
195 sband = &priv->bands[IEEE80211_BAND_2GHZ];
196 sband->channels = channels;
197 /* OFDM & CCK */
198 sband->bitrates = rates;
199 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
200
201 if (priv->cfg->sku & IWL_SKU_N)
202 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
203 IEEE80211_BAND_2GHZ);
204
205 priv->ieee_channels = channels;
206 priv->ieee_rates = rates;
207
208 for (i = 0; i < priv->channel_count; i++) {
209 ch = &priv->channel_info[i];
210
211 if (!iwl_legacy_is_channel_valid(ch))
212 continue;
213
214 sband = &priv->bands[ch->band];
215
216 geo_ch = &sband->channels[sband->n_channels++];
217
218 geo_ch->center_freq =
219 ieee80211_channel_to_frequency(ch->channel, ch->band);
220 geo_ch->max_power = ch->max_power_avg;
221 geo_ch->max_antenna_gain = 0xff;
222 geo_ch->hw_value = ch->channel;
223
224 if (iwl_legacy_is_channel_valid(ch)) {
225 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
226 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
227
228 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
229 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
230
231 if (ch->flags & EEPROM_CHANNEL_RADAR)
232 geo_ch->flags |= IEEE80211_CHAN_RADAR;
233
234 geo_ch->flags |= ch->ht40_extension_channel;
235
236 if (ch->max_power_avg > max_tx_power)
237 max_tx_power = ch->max_power_avg;
238 } else {
239 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
240 }
241
242 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
243 ch->channel, geo_ch->center_freq,
244 iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
245 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
246 "restricted" : "valid",
247 geo_ch->flags);
248 }
249
250 priv->tx_power_device_lmt = max_tx_power;
251 priv->tx_power_user_lmt = max_tx_power;
252 priv->tx_power_next = max_tx_power;
253
254 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
255 priv->cfg->sku & IWL_SKU_A) {
256 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
257 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
258 priv->pci_dev->device,
259 priv->pci_dev->subsystem_device);
260 priv->cfg->sku &= ~IWL_SKU_A;
261 }
262
263 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
264 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
265 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
266
267 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
268
269 return 0;
270}
271EXPORT_SYMBOL(iwl_legacy_init_geos);
272
273/*
274 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
275 */
276void iwl_legacy_free_geos(struct iwl_priv *priv)
277{
278 kfree(priv->ieee_channels);
279 kfree(priv->ieee_rates);
280 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
281}
282EXPORT_SYMBOL(iwl_legacy_free_geos);
283
284static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
285 enum ieee80211_band band,
286 u16 channel, u8 extension_chan_offset)
287{
288 const struct iwl_channel_info *ch_info;
289
290 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
291 if (!iwl_legacy_is_channel_valid(ch_info))
292 return false;
293
294 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
295 return !(ch_info->ht40_extension_channel &
296 IEEE80211_CHAN_NO_HT40PLUS);
297 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
298 return !(ch_info->ht40_extension_channel &
299 IEEE80211_CHAN_NO_HT40MINUS);
300
301 return false;
302}
303
304bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_sta_ht_cap *ht_cap)
307{
308 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
309 return false;
310
311 /*
312 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
313 * the bit will not set if it is pure 40MHz case
314 */
315 if (ht_cap && !ht_cap->ht_supported)
316 return false;
317
318#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
319 if (priv->disable_ht40)
320 return false;
321#endif
322
323 return iwl_legacy_is_channel_extension(priv, priv->band,
324 le16_to_cpu(ctx->staging.channel),
325 ctx->ht.extension_chan_offset);
326}
327EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
328
329static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
330{
331 u16 new_val;
332 u16 beacon_factor;
333
334 /*
335 * If mac80211 hasn't given us a beacon interval, program
336 * the default into the device.
337 */
338 if (!beacon_val)
339 return DEFAULT_BEACON_INTERVAL;
340
341 /*
342 * If the beacon interval we obtained from the peer
343 * is too large, we'll have to wake up more often
344 * (and in IBSS case, we'll beacon too much)
345 *
346 * For example, if max_beacon_val is 4096, and the
347 * requested beacon interval is 7000, we'll have to
348 * use 3500 to be able to wake up on the beacons.
349 *
350 * This could badly influence beacon detection stats.
351 */
352
353 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
354 new_val = beacon_val / beacon_factor;
355
356 if (!new_val)
357 new_val = max_beacon_val;
358
359 return new_val;
360}
361
362int
363iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
364{
365 u64 tsf;
366 s32 interval_tm, rem;
367 struct ieee80211_conf *conf = NULL;
368 u16 beacon_int;
369 struct ieee80211_vif *vif = ctx->vif;
370
371 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
372
373 lockdep_assert_held(&priv->mutex);
374
375 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
376
377 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
378 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
379
380 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
381
382 /*
383 * TODO: For IBSS we need to get atim_window from mac80211,
384 * for now just always use 0
385 */
386 ctx->timing.atim_window = 0;
387
388 beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
389 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
390 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
391
392 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
393 interval_tm = beacon_int * TIME_UNIT;
394 rem = do_div(tsf, interval_tm);
395 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
396
397 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
398
399 IWL_DEBUG_ASSOC(priv,
400 "beacon interval %d beacon timer %d beacon tim %d\n",
401 le16_to_cpu(ctx->timing.beacon_interval),
402 le32_to_cpu(ctx->timing.beacon_init_val),
403 le16_to_cpu(ctx->timing.atim_window));
404
405 return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
406 sizeof(ctx->timing), &ctx->timing);
407}
408EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
409
410void
411iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
412 struct iwl_rxon_context *ctx,
413 int hw_decrypt)
414{
415 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
416
417 if (hw_decrypt)
418 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
419 else
420 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
421
422}
423EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
424
425/* validate RXON structure is valid */
426int
427iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
428{
429 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
430 bool error = false;
431
432 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
433 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
434 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
435 error = true;
436 }
437 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
438 IWL_WARN(priv, "check 2.4G: wrong radar\n");
439 error = true;
440 }
441 } else {
442 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
443 IWL_WARN(priv, "check 5.2G: not short slot!\n");
444 error = true;
445 }
446 if (rxon->flags & RXON_FLG_CCK_MSK) {
447 IWL_WARN(priv, "check 5.2G: CCK!\n");
448 error = true;
449 }
450 }
451 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
452 IWL_WARN(priv, "mac/bssid mcast!\n");
453 error = true;
454 }
455
456 /* make sure basic rates 6Mbps and 1Mbps are supported */
457 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
458 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
459 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
460 error = true;
461 }
462
463 if (le16_to_cpu(rxon->assoc_id) > 2007) {
464 IWL_WARN(priv, "aid > 2007\n");
465 error = true;
466 }
467
468 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
469 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
470 IWL_WARN(priv, "CCK and short slot\n");
471 error = true;
472 }
473
474 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
475 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
476 IWL_WARN(priv, "CCK and auto detect");
477 error = true;
478 }
479
480 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
481 RXON_FLG_TGG_PROTECT_MSK)) ==
482 RXON_FLG_TGG_PROTECT_MSK) {
483 IWL_WARN(priv, "TGg but no auto-detect\n");
484 error = true;
485 }
486
487 if (error)
488 IWL_WARN(priv, "Tuning to channel %d\n",
489 le16_to_cpu(rxon->channel));
490
491 if (error) {
492 IWL_ERR(priv, "Invalid RXON\n");
493 return -EINVAL;
494 }
495 return 0;
496}
497EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
498
499/**
500 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
501 * @priv: staging_rxon is compared to active_rxon
502 *
503 * If the RXON structure is changing enough to require a new tune,
504 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
505 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
506 */
507int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
508 struct iwl_rxon_context *ctx)
509{
510 const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
511 const struct iwl_legacy_rxon_cmd *active = &ctx->active;
512
513#define CHK(cond) \
514 if ((cond)) { \
515 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
516 return 1; \
517 }
518
519#define CHK_NEQ(c1, c2) \
520 if ((c1) != (c2)) { \
521 IWL_DEBUG_INFO(priv, "need full RXON - " \
522 #c1 " != " #c2 " - %d != %d\n", \
523 (c1), (c2)); \
524 return 1; \
525 }
526
527 /* These items are only settable from the full RXON command */
528 CHK(!iwl_legacy_is_associated_ctx(ctx));
529 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
530 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
531 CHK(compare_ether_addr(staging->wlap_bssid_addr,
532 active->wlap_bssid_addr));
533 CHK_NEQ(staging->dev_type, active->dev_type);
534 CHK_NEQ(staging->channel, active->channel);
535 CHK_NEQ(staging->air_propagation, active->air_propagation);
536 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
537 active->ofdm_ht_single_stream_basic_rates);
538 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
539 active->ofdm_ht_dual_stream_basic_rates);
540 CHK_NEQ(staging->assoc_id, active->assoc_id);
541
542 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
543 * be updated with the RXON_ASSOC command -- however only some
544 * flag transitions are allowed using RXON_ASSOC */
545
546 /* Check if we are not switching bands */
547 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
548 active->flags & RXON_FLG_BAND_24G_MSK);
549
550 /* Check if we are switching association toggle */
551 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
552 active->filter_flags & RXON_FILTER_ASSOC_MSK);
553
554#undef CHK
555#undef CHK_NEQ
556
557 return 0;
558}
559EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
560
561u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
562 struct iwl_rxon_context *ctx)
563{
564 /*
565 * Assign the lowest rate -- should really get this from
566 * the beacon skb from mac80211.
567 */
568 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
569 return IWL_RATE_1M_PLCP;
570 else
571 return IWL_RATE_6M_PLCP;
572}
573EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
574
575static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
576 struct iwl_ht_config *ht_conf,
577 struct iwl_rxon_context *ctx)
578{
579 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
580
581 if (!ctx->ht.enabled) {
582 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
583 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
584 RXON_FLG_HT40_PROT_MSK |
585 RXON_FLG_HT_PROT_MSK);
586 return;
587 }
588
589 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
590 RXON_FLG_HT_OPERATING_MODE_POS);
591
592 /* Set up channel bandwidth:
593 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
594 /* clear the HT channel mode before set the mode */
595 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
596 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
597 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
598 /* pure ht40 */
599 if (ctx->ht.protection ==
600 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
601 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
602 /* Note: control channel is opposite of extension channel */
603 switch (ctx->ht.extension_chan_offset) {
604 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
605 rxon->flags &=
606 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
607 break;
608 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
609 rxon->flags |=
610 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
611 break;
612 }
613 } else {
614 /* Note: control channel is opposite of extension channel */
615 switch (ctx->ht.extension_chan_offset) {
616 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
617 rxon->flags &=
618 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
619 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
620 break;
621 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
622 rxon->flags |=
623 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
624 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
625 break;
626 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
627 default:
628 /* channel location only valid if in Mixed mode */
629 IWL_ERR(priv,
630 "invalid extension channel offset\n");
631 break;
632 }
633 }
634 } else {
635 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
636 }
637
638 if (priv->cfg->ops->hcmd->set_rxon_chain)
639 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
640
641 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
642 "extension channel offset 0x%x\n",
643 le32_to_cpu(rxon->flags), ctx->ht.protection,
644 ctx->ht.extension_chan_offset);
645}
646
647void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
648{
649 struct iwl_rxon_context *ctx;
650
651 for_each_context(priv, ctx)
652 _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
653}
654EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
655
656/* Return valid, unused, channel for a passive scan to reset the RF */
657u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
658 enum ieee80211_band band)
659{
660 const struct iwl_channel_info *ch_info;
661 int i;
662 u8 channel = 0;
663 u8 min, max;
664 struct iwl_rxon_context *ctx;
665
666 if (band == IEEE80211_BAND_5GHZ) {
667 min = 14;
668 max = priv->channel_count;
669 } else {
670 min = 0;
671 max = 14;
672 }
673
674 for (i = min; i < max; i++) {
675 bool busy = false;
676
677 for_each_context(priv, ctx) {
678 busy = priv->channel_info[i].channel ==
679 le16_to_cpu(ctx->staging.channel);
680 if (busy)
681 break;
682 }
683
684 if (busy)
685 continue;
686
687 channel = priv->channel_info[i].channel;
688 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
689 if (iwl_legacy_is_channel_valid(ch_info))
690 break;
691 }
692
693 return channel;
694}
695EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
696
697/**
698 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
699 * @ch: requested channel as a pointer to struct ieee80211_channel
700
701 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
702 * in the staging RXON flag structure based on the ch->band
703 */
704int
705iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
706 struct iwl_rxon_context *ctx)
707{
708 enum ieee80211_band band = ch->band;
709 u16 channel = ch->hw_value;
710
711 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
712 (priv->band == band))
713 return 0;
714
715 ctx->staging.channel = cpu_to_le16(channel);
716 if (band == IEEE80211_BAND_5GHZ)
717 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
718 else
719 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
720
721 priv->band = band;
722
723 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
724
725 return 0;
726}
727EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
728
729void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
730 struct iwl_rxon_context *ctx,
731 enum ieee80211_band band,
732 struct ieee80211_vif *vif)
733{
734 if (band == IEEE80211_BAND_5GHZ) {
735 ctx->staging.flags &=
736 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
737 | RXON_FLG_CCK_MSK);
738 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
739 } else {
740 /* Copied from iwl_post_associate() */
741 if (vif && vif->bss_conf.use_short_slot)
742 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
743 else
744 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
745
746 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
747 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
748 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
749 }
750}
751EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
752
753/*
754 * initialize rxon structure with default values from eeprom
755 */
756void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
757 struct iwl_rxon_context *ctx)
758{
759 const struct iwl_channel_info *ch_info;
760
761 memset(&ctx->staging, 0, sizeof(ctx->staging));
762
763 if (!ctx->vif) {
764 ctx->staging.dev_type = ctx->unused_devtype;
765 } else
766 switch (ctx->vif->type) {
767
768 case NL80211_IFTYPE_STATION:
769 ctx->staging.dev_type = ctx->station_devtype;
770 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
771 break;
772
773 case NL80211_IFTYPE_ADHOC:
774 ctx->staging.dev_type = ctx->ibss_devtype;
775 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
776 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
777 RXON_FILTER_ACCEPT_GRP_MSK;
778 break;
779
780 default:
781 IWL_ERR(priv, "Unsupported interface type %d\n",
782 ctx->vif->type);
783 break;
784 }
785
786#if 0
787 /* TODO: Figure out when short_preamble would be set and cache from
788 * that */
789 if (!hw_to_local(priv->hw)->short_preamble)
790 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
791 else
792 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
793#endif
794
795 ch_info = iwl_legacy_get_channel_info(priv, priv->band,
796 le16_to_cpu(ctx->active.channel));
797
798 if (!ch_info)
799 ch_info = &priv->channel_info[0];
800
801 ctx->staging.channel = cpu_to_le16(ch_info->channel);
802 priv->band = ch_info->band;
803
804 iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
805
806 ctx->staging.ofdm_basic_rates =
807 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
808 ctx->staging.cck_basic_rates =
809 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
810
811 /* clear both MIX and PURE40 mode flag */
812 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
813 RXON_FLG_CHANNEL_MODE_PURE_40);
814 if (ctx->vif)
815 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
816
817 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
818 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
819}
820EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
821
822void iwl_legacy_set_rate(struct iwl_priv *priv)
823{
824 const struct ieee80211_supported_band *hw = NULL;
825 struct ieee80211_rate *rate;
826 struct iwl_rxon_context *ctx;
827 int i;
828
829 hw = iwl_get_hw_mode(priv, priv->band);
830 if (!hw) {
831 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
832 return;
833 }
834
835 priv->active_rate = 0;
836
837 for (i = 0; i < hw->n_bitrates; i++) {
838 rate = &(hw->bitrates[i]);
839 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
840 priv->active_rate |= (1 << rate->hw_value);
841 }
842
843 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
844
845 for_each_context(priv, ctx) {
846 ctx->staging.cck_basic_rates =
847 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
848
849 ctx->staging.ofdm_basic_rates =
850 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
851 }
852}
853EXPORT_SYMBOL(iwl_legacy_set_rate);
854
855void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
856{
857 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
858
859 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
860 return;
861
862 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
863 ieee80211_chswitch_done(ctx->vif, is_success);
864}
865EXPORT_SYMBOL(iwl_legacy_chswitch_done);
866
867void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
868{
869 struct iwl_rx_packet *pkt = rxb_addr(rxb);
870 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
871
872 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
873 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
874
875 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
876 return;
877
878 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
879 rxon->channel = csa->channel;
880 ctx->staging.channel = csa->channel;
881 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
882 le16_to_cpu(csa->channel));
883 iwl_legacy_chswitch_done(priv, true);
884 } else {
885 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
886 le16_to_cpu(csa->channel));
887 iwl_legacy_chswitch_done(priv, false);
888 }
889}
890EXPORT_SYMBOL(iwl_legacy_rx_csa);
891
892#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
893void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
894 struct iwl_rxon_context *ctx)
895{
896 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
897
898 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
899 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
900 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
901 le16_to_cpu(rxon->channel));
902 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
903 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
904 le32_to_cpu(rxon->filter_flags));
905 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
906 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
907 rxon->ofdm_basic_rates);
908 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
909 rxon->cck_basic_rates);
910 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
911 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
912 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
913 le16_to_cpu(rxon->assoc_id));
914}
915EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
916#endif
917/**
918 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
919 */
920void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
921{
922 /* Set the FW error flag -- cleared on iwl_down */
923 set_bit(STATUS_FW_ERROR, &priv->status);
924
925 /* Cancel currently queued command. */
926 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
927
928 IWL_ERR(priv, "Loaded firmware version: %s\n",
929 priv->hw->wiphy->fw_version);
930
931 priv->cfg->ops->lib->dump_nic_error_log(priv);
932 if (priv->cfg->ops->lib->dump_fh)
933 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
934#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
935 if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
936 iwl_legacy_print_rx_config_cmd(priv,
937 &priv->contexts[IWL_RXON_CTX_BSS]);
938#endif
939
940 wake_up(&priv->wait_command_queue);
941
942 /* Keep the restart process from trying to send host
943 * commands by clearing the INIT status bit */
944 clear_bit(STATUS_READY, &priv->status);
945
946 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
947 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
948 "Restarting adapter due to uCode error.\n");
949
950 if (priv->cfg->mod_params->restart_fw)
951 queue_work(priv->workqueue, &priv->restart);
952 }
953}
954EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
955
956static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
957{
958 int ret = 0;
959
960 /* stop device's busmaster DMA activity */
961 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
962
963 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
964 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
965 if (ret)
966 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
967
968 IWL_DEBUG_INFO(priv, "stop master\n");
969
970 return ret;
971}
972
973void iwl_legacy_apm_stop(struct iwl_priv *priv)
974{
975 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
976
977 /* Stop device's DMA activity */
978 iwl_legacy_apm_stop_master(priv);
979
980 /* Reset the entire device */
981 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
982
983 udelay(10);
984
985 /*
986 * Clear "initialization complete" bit to move adapter from
987 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
988 */
989 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
990 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
991}
992EXPORT_SYMBOL(iwl_legacy_apm_stop);
993
994
995/*
996 * Start up NIC's basic functionality after it has been reset
997 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
998 * NOTE: This does not load uCode nor start the embedded processor
999 */
1000int iwl_legacy_apm_init(struct iwl_priv *priv)
1001{
1002 int ret = 0;
1003 u16 lctl;
1004
1005 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1006
1007 /*
1008 * Use "set_bit" below rather than "write", to preserve any hardware
1009 * bits already set by default after reset.
1010 */
1011
1012 /* Disable L0S exit timer (platform NMI Work/Around) */
1013 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1014 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1015
1016 /*
1017 * Disable L0s without affecting L1;
1018 * don't wait for ICH L0s (ICH bug W/A)
1019 */
1020 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1021 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1022
1023 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1024 iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
1025 CSR_DBG_HPET_MEM_REG_VAL);
1026
1027 /*
1028 * Enable HAP INTA (interrupt from management bus) to
1029 * wake device's PCI Express link L1a -> L0s
1030 * NOTE: This is no-op for 3945 (non-existent bit)
1031 */
1032 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1033 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1034
1035 /*
1036 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1037 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1038 * If so (likely), disable L0S, so device moves directly L0->L1;
1039 * costs negligible amount of power savings.
1040 * If not (unlikely), enable L0S, so there is at least some
1041 * power savings, even without L1.
1042 */
1043 if (priv->cfg->base_params->set_l0s) {
1044 lctl = iwl_legacy_pcie_link_ctl(priv);
1045 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1046 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1047 /* L1-ASPM enabled; disable(!) L0S */
1048 iwl_legacy_set_bit(priv, CSR_GIO_REG,
1049 CSR_GIO_REG_VAL_L0S_ENABLED);
1050 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1051 } else {
1052 /* L1-ASPM disabled; enable(!) L0S */
1053 iwl_legacy_clear_bit(priv, CSR_GIO_REG,
1054 CSR_GIO_REG_VAL_L0S_ENABLED);
1055 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1056 }
1057 }
1058
1059 /* Configure analog phase-lock-loop before activating to D0A */
1060 if (priv->cfg->base_params->pll_cfg_val)
1061 iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
1062 priv->cfg->base_params->pll_cfg_val);
1063
1064 /*
1065 * Set "initialization complete" bit to move adapter from
1066 * D0U* --> D0A* (powered-up active) state.
1067 */
1068 iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1069
1070 /*
1071 * Wait for clock stabilization; once stabilized, access to
1072 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1073 * and accesses to uCode SRAM.
1074 */
1075 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1076 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1077 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1078 if (ret < 0) {
1079 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1080 goto out;
1081 }
1082
1083 /*
1084 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1085 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1086 *
1087 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1088 * do not disable clocks. This preserves any hardware bits already
1089 * set by default in "CLK_CTRL_REG" after reset.
1090 */
1091 if (priv->cfg->base_params->use_bsm)
1092 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1093 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1094 else
1095 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1096 APMG_CLK_VAL_DMA_CLK_RQT);
1097 udelay(20);
1098
1099 /* Disable L1-Active */
1100 iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1101 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1102
1103out:
1104 return ret;
1105}
1106EXPORT_SYMBOL(iwl_legacy_apm_init);
1107
1108
1109int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1110{
1111 int ret;
1112 s8 prev_tx_power;
1113 bool defer;
1114 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1115
1116 lockdep_assert_held(&priv->mutex);
1117
1118 if (priv->tx_power_user_lmt == tx_power && !force)
1119 return 0;
1120
1121 if (!priv->cfg->ops->lib->send_tx_power)
1122 return -EOPNOTSUPP;
1123
1124 /* 0 dBm mean 1 milliwatt */
1125 if (tx_power < 0) {
1126 IWL_WARN(priv,
1127 "Requested user TXPOWER %d below 1 mW.\n",
1128 tx_power);
1129 return -EINVAL;
1130 }
1131
1132 if (tx_power > priv->tx_power_device_lmt) {
1133 IWL_WARN(priv,
1134 "Requested user TXPOWER %d above upper limit %d.\n",
1135 tx_power, priv->tx_power_device_lmt);
1136 return -EINVAL;
1137 }
1138
1139 if (!iwl_legacy_is_ready_rf(priv))
1140 return -EIO;
1141
1142 /* scan complete and commit_rxon use tx_power_next value,
1143 * it always need to be updated for newest request */
1144 priv->tx_power_next = tx_power;
1145
1146 /* do not set tx power when scanning or channel changing */
1147 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1148 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1149 if (defer && !force) {
1150 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1151 return 0;
1152 }
1153
1154 prev_tx_power = priv->tx_power_user_lmt;
1155 priv->tx_power_user_lmt = tx_power;
1156
1157 ret = priv->cfg->ops->lib->send_tx_power(priv);
1158
1159 /* if fail to set tx_power, restore the orig. tx power */
1160 if (ret) {
1161 priv->tx_power_user_lmt = prev_tx_power;
1162 priv->tx_power_next = prev_tx_power;
1163 }
1164 return ret;
1165}
1166EXPORT_SYMBOL(iwl_legacy_set_tx_power);
1167
1168void iwl_legacy_send_bt_config(struct iwl_priv *priv)
1169{
1170 struct iwl_bt_cmd bt_cmd = {
1171 .lead_time = BT_LEAD_TIME_DEF,
1172 .max_kill = BT_MAX_KILL_DEF,
1173 .kill_ack_mask = 0,
1174 .kill_cts_mask = 0,
1175 };
1176
1177 if (!bt_coex_active)
1178 bt_cmd.flags = BT_COEX_DISABLE;
1179 else
1180 bt_cmd.flags = BT_COEX_ENABLE;
1181
1182 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1183 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1184
1185 if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1186 sizeof(struct iwl_bt_cmd), &bt_cmd))
1187 IWL_ERR(priv, "failed to send BT Coex Config\n");
1188}
1189EXPORT_SYMBOL(iwl_legacy_send_bt_config);
1190
1191int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1192{
1193 struct iwl_statistics_cmd statistics_cmd = {
1194 .configuration_flags =
1195 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1196 };
1197
1198 if (flags & CMD_ASYNC)
1199 return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1200 sizeof(struct iwl_statistics_cmd),
1201 &statistics_cmd, NULL);
1202 else
1203 return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1204 sizeof(struct iwl_statistics_cmd),
1205 &statistics_cmd);
1206}
1207EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
1208
1209void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
1210 struct iwl_rx_mem_buffer *rxb)
1211{
1212#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1213 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1214 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1215 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1216 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1217#endif
1218}
1219EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
1220
1221void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1222 struct iwl_rx_mem_buffer *rxb)
1223{
1224 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1225 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1226 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1227 "notification for %s:\n", len,
1228 iwl_legacy_get_cmd_string(pkt->hdr.cmd));
1229 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1230}
1231EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
1232
1233void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
1234 struct iwl_rx_mem_buffer *rxb)
1235{
1236 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1237
1238 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1239 "seq 0x%04X ser 0x%08X\n",
1240 le32_to_cpu(pkt->u.err_resp.error_type),
1241 iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
1242 pkt->u.err_resp.cmd_id,
1243 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1244 le32_to_cpu(pkt->u.err_resp.error_info));
1245}
1246EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
1247
1248void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
1249{
1250 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1251}
1252
1253int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
1254 struct ieee80211_vif *vif, u16 queue,
1255 const struct ieee80211_tx_queue_params *params)
1256{
1257 struct iwl_priv *priv = hw->priv;
1258 struct iwl_rxon_context *ctx;
1259 unsigned long flags;
1260 int q;
1261
1262 IWL_DEBUG_MAC80211(priv, "enter\n");
1263
1264 if (!iwl_legacy_is_ready_rf(priv)) {
1265 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1266 return -EIO;
1267 }
1268
1269 if (queue >= AC_NUM) {
1270 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1271 return 0;
1272 }
1273
1274 q = AC_NUM - 1 - queue;
1275
1276 spin_lock_irqsave(&priv->lock, flags);
1277
1278 for_each_context(priv, ctx) {
1279 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1280 cpu_to_le16(params->cw_min);
1281 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1282 cpu_to_le16(params->cw_max);
1283 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1284 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1285 cpu_to_le16((params->txop * 32));
1286
1287 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1288 }
1289
1290 spin_unlock_irqrestore(&priv->lock, flags);
1291
1292 IWL_DEBUG_MAC80211(priv, "leave\n");
1293 return 0;
1294}
1295EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
1296
1297int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
1298{
1299 struct iwl_priv *priv = hw->priv;
1300
1301 return priv->ibss_manager == IWL_IBSS_MANAGER;
1302}
1303EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
1304
1305static int
1306iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1307{
1308 iwl_legacy_connection_init_rx_config(priv, ctx);
1309
1310 if (priv->cfg->ops->hcmd->set_rxon_chain)
1311 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1312
1313 return iwl_legacy_commit_rxon(priv, ctx);
1314}
1315
1316static int iwl_legacy_setup_interface(struct iwl_priv *priv,
1317 struct iwl_rxon_context *ctx)
1318{
1319 struct ieee80211_vif *vif = ctx->vif;
1320 int err;
1321
1322 lockdep_assert_held(&priv->mutex);
1323
1324 /*
1325 * This variable will be correct only when there's just
1326 * a single context, but all code using it is for hardware
1327 * that supports only one context.
1328 */
1329 priv->iw_mode = vif->type;
1330
1331 ctx->is_active = true;
1332
1333 err = iwl_legacy_set_mode(priv, ctx);
1334 if (err) {
1335 if (!ctx->always_active)
1336 ctx->is_active = false;
1337 return err;
1338 }
1339
1340 return 0;
1341}
1342
1343int
1344iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1345{
1346 struct iwl_priv *priv = hw->priv;
1347 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1348 struct iwl_rxon_context *tmp, *ctx = NULL;
1349 int err;
1350
1351 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1352 vif->type, vif->addr);
1353
1354 mutex_lock(&priv->mutex);
1355
1356 if (!iwl_legacy_is_ready_rf(priv)) {
1357 IWL_WARN(priv, "Try to add interface when device not ready\n");
1358 err = -EINVAL;
1359 goto out;
1360 }
1361
1362 for_each_context(priv, tmp) {
1363 u32 possible_modes =
1364 tmp->interface_modes | tmp->exclusive_interface_modes;
1365
1366 if (tmp->vif) {
1367 /* check if this busy context is exclusive */
1368 if (tmp->exclusive_interface_modes &
1369 BIT(tmp->vif->type)) {
1370 err = -EINVAL;
1371 goto out;
1372 }
1373 continue;
1374 }
1375
1376 if (!(possible_modes & BIT(vif->type)))
1377 continue;
1378
1379 /* have maybe usable context w/o interface */
1380 ctx = tmp;
1381 break;
1382 }
1383
1384 if (!ctx) {
1385 err = -EOPNOTSUPP;
1386 goto out;
1387 }
1388
1389 vif_priv->ctx = ctx;
1390 ctx->vif = vif;
1391
1392 err = iwl_legacy_setup_interface(priv, ctx);
1393 if (!err)
1394 goto out;
1395
1396 ctx->vif = NULL;
1397 priv->iw_mode = NL80211_IFTYPE_STATION;
1398 out:
1399 mutex_unlock(&priv->mutex);
1400
1401 IWL_DEBUG_MAC80211(priv, "leave\n");
1402 return err;
1403}
1404EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
1405
1406static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
1407 struct ieee80211_vif *vif,
1408 bool mode_change)
1409{
1410 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1411
1412 lockdep_assert_held(&priv->mutex);
1413
1414 if (priv->scan_vif == vif) {
1415 iwl_legacy_scan_cancel_timeout(priv, 200);
1416 iwl_legacy_force_scan_end(priv);
1417 }
1418
1419 if (!mode_change) {
1420 iwl_legacy_set_mode(priv, ctx);
1421 if (!ctx->always_active)
1422 ctx->is_active = false;
1423 }
1424}
1425
1426void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
1427 struct ieee80211_vif *vif)
1428{
1429 struct iwl_priv *priv = hw->priv;
1430 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1431
1432 IWL_DEBUG_MAC80211(priv, "enter\n");
1433
1434 mutex_lock(&priv->mutex);
1435
1436 WARN_ON(ctx->vif != vif);
1437 ctx->vif = NULL;
1438
1439 iwl_legacy_teardown_interface(priv, vif, false);
1440
1441 memset(priv->bssid, 0, ETH_ALEN);
1442 mutex_unlock(&priv->mutex);
1443
1444 IWL_DEBUG_MAC80211(priv, "leave\n");
1445
1446}
1447EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
1448
1449int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
1450{
1451 if (!priv->txq)
1452 priv->txq = kzalloc(
1453 sizeof(struct iwl_tx_queue) *
1454 priv->cfg->base_params->num_of_queues,
1455 GFP_KERNEL);
1456 if (!priv->txq) {
1457 IWL_ERR(priv, "Not enough memory for txq\n");
1458 return -ENOMEM;
1459 }
1460 return 0;
1461}
1462EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
1463
1464void iwl_legacy_txq_mem(struct iwl_priv *priv)
1465{
1466 kfree(priv->txq);
1467 priv->txq = NULL;
1468}
1469EXPORT_SYMBOL(iwl_legacy_txq_mem);
1470
1471#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1472
1473#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1474
1475void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
1476{
1477 priv->tx_traffic_idx = 0;
1478 priv->rx_traffic_idx = 0;
1479 if (priv->tx_traffic)
1480 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1481 if (priv->rx_traffic)
1482 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1483}
1484
1485int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
1486{
1487 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1488
1489 if (iwlegacy_debug_level & IWL_DL_TX) {
1490 if (!priv->tx_traffic) {
1491 priv->tx_traffic =
1492 kzalloc(traffic_size, GFP_KERNEL);
1493 if (!priv->tx_traffic)
1494 return -ENOMEM;
1495 }
1496 }
1497 if (iwlegacy_debug_level & IWL_DL_RX) {
1498 if (!priv->rx_traffic) {
1499 priv->rx_traffic =
1500 kzalloc(traffic_size, GFP_KERNEL);
1501 if (!priv->rx_traffic)
1502 return -ENOMEM;
1503 }
1504 }
1505 iwl_legacy_reset_traffic_log(priv);
1506 return 0;
1507}
1508EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
1509
1510void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
1511{
1512 kfree(priv->tx_traffic);
1513 priv->tx_traffic = NULL;
1514
1515 kfree(priv->rx_traffic);
1516 priv->rx_traffic = NULL;
1517}
1518EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
1519
1520void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
1521 u16 length, struct ieee80211_hdr *header)
1522{
1523 __le16 fc;
1524 u16 len;
1525
1526 if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
1527 return;
1528
1529 if (!priv->tx_traffic)
1530 return;
1531
1532 fc = header->frame_control;
1533 if (ieee80211_is_data(fc)) {
1534 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1535 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1536 memcpy((priv->tx_traffic +
1537 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1538 header, len);
1539 priv->tx_traffic_idx =
1540 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1541 }
1542}
1543EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
1544
1545void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
1546 u16 length, struct ieee80211_hdr *header)
1547{
1548 __le16 fc;
1549 u16 len;
1550
1551 if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
1552 return;
1553
1554 if (!priv->rx_traffic)
1555 return;
1556
1557 fc = header->frame_control;
1558 if (ieee80211_is_data(fc)) {
1559 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1560 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1561 memcpy((priv->rx_traffic +
1562 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1563 header, len);
1564 priv->rx_traffic_idx =
1565 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1566 }
1567}
1568EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
1569
1570const char *iwl_legacy_get_mgmt_string(int cmd)
1571{
1572 switch (cmd) {
1573 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1574 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1575 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1576 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1577 IWL_CMD(MANAGEMENT_PROBE_REQ);
1578 IWL_CMD(MANAGEMENT_PROBE_RESP);
1579 IWL_CMD(MANAGEMENT_BEACON);
1580 IWL_CMD(MANAGEMENT_ATIM);
1581 IWL_CMD(MANAGEMENT_DISASSOC);
1582 IWL_CMD(MANAGEMENT_AUTH);
1583 IWL_CMD(MANAGEMENT_DEAUTH);
1584 IWL_CMD(MANAGEMENT_ACTION);
1585 default:
1586 return "UNKNOWN";
1587
1588 }
1589}
1590
1591const char *iwl_legacy_get_ctrl_string(int cmd)
1592{
1593 switch (cmd) {
1594 IWL_CMD(CONTROL_BACK_REQ);
1595 IWL_CMD(CONTROL_BACK);
1596 IWL_CMD(CONTROL_PSPOLL);
1597 IWL_CMD(CONTROL_RTS);
1598 IWL_CMD(CONTROL_CTS);
1599 IWL_CMD(CONTROL_ACK);
1600 IWL_CMD(CONTROL_CFEND);
1601 IWL_CMD(CONTROL_CFENDACK);
1602 default:
1603 return "UNKNOWN";
1604
1605 }
1606}
1607
1608void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
1609{
1610 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1611 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1612}
1613
1614/*
1615 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1616 * iwl_legacy_update_stats function will
1617 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1618 * Use debugFs to display the rx/rx_statistics
1619 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1620 * information will be recorded, but DATA pkt still will be recorded
1621 * for the reason of iwl_led.c need to control the led blinking based on
1622 * number of tx and rx data.
1623 *
1624 */
1625void
1626iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1627{
1628 struct traffic_stats *stats;
1629
1630 if (is_tx)
1631 stats = &priv->tx_stats;
1632 else
1633 stats = &priv->rx_stats;
1634
1635 if (ieee80211_is_mgmt(fc)) {
1636 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1637 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1638 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1639 break;
1640 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1641 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1642 break;
1643 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1644 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1645 break;
1646 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1647 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1648 break;
1649 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1650 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1651 break;
1652 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1653 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1654 break;
1655 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1656 stats->mgmt[MANAGEMENT_BEACON]++;
1657 break;
1658 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1659 stats->mgmt[MANAGEMENT_ATIM]++;
1660 break;
1661 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1662 stats->mgmt[MANAGEMENT_DISASSOC]++;
1663 break;
1664 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1665 stats->mgmt[MANAGEMENT_AUTH]++;
1666 break;
1667 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1668 stats->mgmt[MANAGEMENT_DEAUTH]++;
1669 break;
1670 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1671 stats->mgmt[MANAGEMENT_ACTION]++;
1672 break;
1673 }
1674 } else if (ieee80211_is_ctl(fc)) {
1675 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1676 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1677 stats->ctrl[CONTROL_BACK_REQ]++;
1678 break;
1679 case cpu_to_le16(IEEE80211_STYPE_BACK):
1680 stats->ctrl[CONTROL_BACK]++;
1681 break;
1682 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1683 stats->ctrl[CONTROL_PSPOLL]++;
1684 break;
1685 case cpu_to_le16(IEEE80211_STYPE_RTS):
1686 stats->ctrl[CONTROL_RTS]++;
1687 break;
1688 case cpu_to_le16(IEEE80211_STYPE_CTS):
1689 stats->ctrl[CONTROL_CTS]++;
1690 break;
1691 case cpu_to_le16(IEEE80211_STYPE_ACK):
1692 stats->ctrl[CONTROL_ACK]++;
1693 break;
1694 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1695 stats->ctrl[CONTROL_CFEND]++;
1696 break;
1697 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1698 stats->ctrl[CONTROL_CFENDACK]++;
1699 break;
1700 }
1701 } else {
1702 /* data */
1703 stats->data_cnt++;
1704 stats->data_bytes += len;
1705 }
1706}
1707EXPORT_SYMBOL(iwl_legacy_update_stats);
1708#endif
1709
1710int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
1711{
1712 struct iwl_force_reset *force_reset;
1713
1714 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1715 return -EINVAL;
1716
1717 force_reset = &priv->force_reset;
1718 force_reset->reset_request_count++;
1719 if (!external) {
1720 if (force_reset->last_force_reset_jiffies &&
1721 time_after(force_reset->last_force_reset_jiffies +
1722 force_reset->reset_duration, jiffies)) {
1723 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1724 force_reset->reset_reject_count++;
1725 return -EAGAIN;
1726 }
1727 }
1728 force_reset->reset_success_count++;
1729 force_reset->last_force_reset_jiffies = jiffies;
1730
1731 /*
1732 * if the request is from external(ex: debugfs),
1733 * then always perform the request in regardless the module
1734 * parameter setting
1735 * if the request is from internal (uCode error or driver
1736 * detect failure), then fw_restart module parameter
1737 * need to be check before performing firmware reload
1738 */
1739
1740 if (!external && !priv->cfg->mod_params->restart_fw) {
1741 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1742 "module parameter setting\n");
1743 return 0;
1744 }
1745
1746 IWL_ERR(priv, "On demand firmware reload\n");
1747
1748 /* Set the FW error flag -- cleared on iwl_down */
1749 set_bit(STATUS_FW_ERROR, &priv->status);
1750 wake_up(&priv->wait_command_queue);
1751 /*
1752 * Keep the restart process from trying to send host
1753 * commands by clearing the INIT status bit
1754 */
1755 clear_bit(STATUS_READY, &priv->status);
1756 queue_work(priv->workqueue, &priv->restart);
1757
1758 return 0;
1759}
1760
1761int
1762iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1763 struct ieee80211_vif *vif,
1764 enum nl80211_iftype newtype, bool newp2p)
1765{
1766 struct iwl_priv *priv = hw->priv;
1767 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1768 struct iwl_rxon_context *tmp;
1769 u32 interface_modes;
1770 int err;
1771
1772 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1773
1774 mutex_lock(&priv->mutex);
1775
1776 if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
1777 /*
1778 * Huh? But wait ... this can maybe happen when
1779 * we're in the middle of a firmware restart!
1780 */
1781 err = -EBUSY;
1782 goto out;
1783 }
1784
1785 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1786
1787 if (!(interface_modes & BIT(newtype))) {
1788 err = -EBUSY;
1789 goto out;
1790 }
1791
1792 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1793 for_each_context(priv, tmp) {
1794 if (ctx == tmp)
1795 continue;
1796
1797 if (!tmp->vif)
1798 continue;
1799
1800 /*
1801 * The current mode switch would be exclusive, but
1802 * another context is active ... refuse the switch.
1803 */
1804 err = -EBUSY;
1805 goto out;
1806 }
1807 }
1808
1809 /* success */
1810 iwl_legacy_teardown_interface(priv, vif, true);
1811 vif->type = newtype;
1812 vif->p2p = newp2p;
1813 err = iwl_legacy_setup_interface(priv, ctx);
1814 WARN_ON(err);
1815 /*
1816 * We've switched internally, but submitting to the
1817 * device may have failed for some reason. Mask this
1818 * error, because otherwise mac80211 will not switch
1819 * (and set the interface type back) and we'll be
1820 * out of sync with it.
1821 */
1822 err = 0;
1823
1824 out:
1825 mutex_unlock(&priv->mutex);
1826 return err;
1827}
1828EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
1829
1830/*
1831 * On every watchdog tick we check (latest) time stamp. If it does not
1832 * change during timeout period and queue is not empty we reset firmware.
1833 */
1834static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
1835{
1836 struct iwl_tx_queue *txq = &priv->txq[cnt];
1837 struct iwl_queue *q = &txq->q;
1838 unsigned long timeout;
1839 int ret;
1840
1841 if (q->read_ptr == q->write_ptr) {
1842 txq->time_stamp = jiffies;
1843 return 0;
1844 }
1845
1846 timeout = txq->time_stamp +
1847 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1848
1849 if (time_after(jiffies, timeout)) {
1850 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1851 q->id, priv->cfg->base_params->wd_timeout);
1852 ret = iwl_legacy_force_reset(priv, false);
1853 return (ret == -EAGAIN) ? 0 : 1;
1854 }
1855
1856 return 0;
1857}
1858
1859/*
1860 * Making watchdog tick be a quarter of timeout assure we will
1861 * discover the queue hung between timeout and 1.25*timeout
1862 */
1863#define IWL_WD_TICK(timeout) ((timeout) / 4)
1864
1865/*
1866 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1867 * we reset the firmware. If everything is fine just rearm the timer.
1868 */
1869void iwl_legacy_bg_watchdog(unsigned long data)
1870{
1871 struct iwl_priv *priv = (struct iwl_priv *)data;
1872 int cnt;
1873 unsigned long timeout;
1874
1875 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1876 return;
1877
1878 timeout = priv->cfg->base_params->wd_timeout;
1879 if (timeout == 0)
1880 return;
1881
1882 /* monitor and check for stuck cmd queue */
1883 if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
1884 return;
1885
1886 /* monitor and check for other stuck queues */
1887 if (iwl_legacy_is_any_associated(priv)) {
1888 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1889 /* skip as we already checked the command queue */
1890 if (cnt == priv->cmd_queue)
1891 continue;
1892 if (iwl_legacy_check_stuck_queue(priv, cnt))
1893 return;
1894 }
1895 }
1896
1897 mod_timer(&priv->watchdog, jiffies +
1898 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1899}
1900EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
1901
1902void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
1903{
1904 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1905
1906 if (timeout)
1907 mod_timer(&priv->watchdog,
1908 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1909 else
1910 del_timer(&priv->watchdog);
1911}
1912EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
1913
1914/*
1915 * extended beacon time format
1916 * time in usec will be changed into a 32-bit value in extended:internal format
1917 * the extended part is the beacon counts
1918 * the internal part is the time in usec within one beacon interval
1919 */
1920u32
1921iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
1922 u32 usec, u32 beacon_interval)
1923{
1924 u32 quot;
1925 u32 rem;
1926 u32 interval = beacon_interval * TIME_UNIT;
1927
1928 if (!interval || !usec)
1929 return 0;
1930
1931 quot = (usec / interval) &
1932 (iwl_legacy_beacon_time_mask_high(priv,
1933 priv->hw_params.beacon_time_tsf_bits) >>
1934 priv->hw_params.beacon_time_tsf_bits);
1935 rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
1936 priv->hw_params.beacon_time_tsf_bits);
1937
1938 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1939}
1940EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
1941
1942/* base is usually what we get from ucode with each received frame,
1943 * the same as HW timer counter counting down
1944 */
1945__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
1946 u32 addon, u32 beacon_interval)
1947{
1948 u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
1949 priv->hw_params.beacon_time_tsf_bits);
1950 u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
1951 priv->hw_params.beacon_time_tsf_bits);
1952 u32 interval = beacon_interval * TIME_UNIT;
1953 u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
1954 priv->hw_params.beacon_time_tsf_bits)) +
1955 (addon & iwl_legacy_beacon_time_mask_high(priv,
1956 priv->hw_params.beacon_time_tsf_bits));
1957
1958 if (base_low > addon_low)
1959 res += base_low - addon_low;
1960 else if (base_low < addon_low) {
1961 res += interval + base_low - addon_low;
1962 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1963 } else
1964 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1965
1966 return cpu_to_le32(res);
1967}
1968EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
1969
1970#ifdef CONFIG_PM
1971
1972int iwl_legacy_pci_suspend(struct device *device)
1973{
1974 struct pci_dev *pdev = to_pci_dev(device);
1975 struct iwl_priv *priv = pci_get_drvdata(pdev);
1976
1977 /*
1978 * This function is called when system goes into suspend state
1979 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1980 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1981 * it will not call apm_ops.stop() to stop the DMA operation.
1982 * Calling apm_ops.stop here to make sure we stop the DMA.
1983 */
1984 iwl_legacy_apm_stop(priv);
1985
1986 return 0;
1987}
1988EXPORT_SYMBOL(iwl_legacy_pci_suspend);
1989
1990int iwl_legacy_pci_resume(struct device *device)
1991{
1992 struct pci_dev *pdev = to_pci_dev(device);
1993 struct iwl_priv *priv = pci_get_drvdata(pdev);
1994 bool hw_rfkill = false;
1995
1996 /*
1997 * We disable the RETRY_TIMEOUT register (0x41) to keep
1998 * PCI Tx retries from interfering with C3 CPU state.
1999 */
2000 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2001
2002 iwl_legacy_enable_interrupts(priv);
2003
2004 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2005 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2006 hw_rfkill = true;
2007
2008 if (hw_rfkill)
2009 set_bit(STATUS_RF_KILL_HW, &priv->status);
2010 else
2011 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2012
2013 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2014
2015 return 0;
2016}
2017EXPORT_SYMBOL(iwl_legacy_pci_resume);
2018
2019const struct dev_pm_ops iwl_legacy_pm_ops = {
2020 .suspend = iwl_legacy_pci_suspend,
2021 .resume = iwl_legacy_pci_resume,
2022 .freeze = iwl_legacy_pci_suspend,
2023 .thaw = iwl_legacy_pci_resume,
2024 .poweroff = iwl_legacy_pci_suspend,
2025 .restore = iwl_legacy_pci_resume,
2026};
2027EXPORT_SYMBOL(iwl_legacy_pm_ops);
2028
2029#endif /* CONFIG_PM */
2030
2031static void
2032iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2033{
2034 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2035 return;
2036
2037 if (!ctx->is_active)
2038 return;
2039
2040 ctx->qos_data.def_qos_parm.qos_flags = 0;
2041
2042 if (ctx->qos_data.qos_active)
2043 ctx->qos_data.def_qos_parm.qos_flags |=
2044 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2045
2046 if (ctx->ht.enabled)
2047 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
2048
2049 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2050 ctx->qos_data.qos_active,
2051 ctx->qos_data.def_qos_parm.qos_flags);
2052
2053 iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
2054 sizeof(struct iwl_qosparam_cmd),
2055 &ctx->qos_data.def_qos_parm, NULL);
2056}
2057
2058/**
2059 * iwl_legacy_mac_config - mac80211 config callback
2060 */
2061int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2062{
2063 struct iwl_priv *priv = hw->priv;
2064 const struct iwl_channel_info *ch_info;
2065 struct ieee80211_conf *conf = &hw->conf;
2066 struct ieee80211_channel *channel = conf->channel;
2067 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2068 struct iwl_rxon_context *ctx;
2069 unsigned long flags = 0;
2070 int ret = 0;
2071 u16 ch;
2072 int scan_active = 0;
2073 bool ht_changed[NUM_IWL_RXON_CTX] = {};
2074
2075 if (WARN_ON(!priv->cfg->ops->legacy))
2076 return -EOPNOTSUPP;
2077
2078 mutex_lock(&priv->mutex);
2079
2080 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2081 channel->hw_value, changed);
2082
2083 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
2084 scan_active = 1;
2085 IWL_DEBUG_MAC80211(priv, "scan active\n");
2086 }
2087
2088 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2089 IEEE80211_CONF_CHANGE_CHANNEL)) {
2090 /* mac80211 uses static for non-HT which is what we want */
2091 priv->current_ht_config.smps = conf->smps_mode;
2092
2093 /*
2094 * Recalculate chain counts.
2095 *
2096 * If monitor mode is enabled then mac80211 will
2097 * set up the SM PS mode to OFF if an HT channel is
2098 * configured.
2099 */
2100 if (priv->cfg->ops->hcmd->set_rxon_chain)
2101 for_each_context(priv, ctx)
2102 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2103 }
2104
2105 /* during scanning mac80211 will delay channel setting until
2106 * scan finish with changed = 0
2107 */
2108 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2109 if (scan_active)
2110 goto set_ch_out;
2111
2112 ch = channel->hw_value;
2113 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2114 if (!iwl_legacy_is_channel_valid(ch_info)) {
2115 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2116 ret = -EINVAL;
2117 goto set_ch_out;
2118 }
2119
2120 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2121 !iwl_legacy_is_channel_ibss(ch_info)) {
2122 IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
2123 ret = -EINVAL;
2124 goto set_ch_out;
2125 }
2126
2127 spin_lock_irqsave(&priv->lock, flags);
2128
2129 for_each_context(priv, ctx) {
2130 /* Configure HT40 channels */
2131 if (ctx->ht.enabled != conf_is_ht(conf)) {
2132 ctx->ht.enabled = conf_is_ht(conf);
2133 ht_changed[ctx->ctxid] = true;
2134 }
2135 if (ctx->ht.enabled) {
2136 if (conf_is_ht40_minus(conf)) {
2137 ctx->ht.extension_chan_offset =
2138 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2139 ctx->ht.is_40mhz = true;
2140 } else if (conf_is_ht40_plus(conf)) {
2141 ctx->ht.extension_chan_offset =
2142 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2143 ctx->ht.is_40mhz = true;
2144 } else {
2145 ctx->ht.extension_chan_offset =
2146 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2147 ctx->ht.is_40mhz = false;
2148 }
2149 } else
2150 ctx->ht.is_40mhz = false;
2151
2152 /*
2153 * Default to no protection. Protection mode will
2154 * later be set from BSS config in iwl_ht_conf
2155 */
2156 ctx->ht.protection =
2157 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2158
2159 /* if we are switching from ht to 2.4 clear flags
2160 * from any ht related info since 2.4 does not
2161 * support ht */
2162 if ((le16_to_cpu(ctx->staging.channel) != ch))
2163 ctx->staging.flags = 0;
2164
2165 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2166 iwl_legacy_set_rxon_ht(priv, ht_conf);
2167
2168 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2169 ctx->vif);
2170 }
2171
2172 spin_unlock_irqrestore(&priv->lock, flags);
2173
2174 if (priv->cfg->ops->legacy->update_bcast_stations)
2175 ret =
2176 priv->cfg->ops->legacy->update_bcast_stations(priv);
2177
2178 set_ch_out:
2179 /* The list of supported rates and rate mask can be different
2180 * for each band; since the band may have changed, reset
2181 * the rate mask to what mac80211 lists */
2182 iwl_legacy_set_rate(priv);
2183 }
2184
2185 if (changed & (IEEE80211_CONF_CHANGE_PS |
2186 IEEE80211_CONF_CHANGE_IDLE)) {
2187 ret = iwl_legacy_power_update_mode(priv, false);
2188 if (ret)
2189 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2190 }
2191
2192 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2193 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2194 priv->tx_power_user_lmt, conf->power_level);
2195
2196 iwl_legacy_set_tx_power(priv, conf->power_level, false);
2197 }
2198
2199 if (!iwl_legacy_is_ready(priv)) {
2200 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2201 goto out;
2202 }
2203
2204 if (scan_active)
2205 goto out;
2206
2207 for_each_context(priv, ctx) {
2208 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2209 iwl_legacy_commit_rxon(priv, ctx);
2210 else
2211 IWL_DEBUG_INFO(priv,
2212 "Not re-sending same RXON configuration.\n");
2213 if (ht_changed[ctx->ctxid])
2214 iwl_legacy_update_qos(priv, ctx);
2215 }
2216
2217out:
2218 IWL_DEBUG_MAC80211(priv, "leave\n");
2219 mutex_unlock(&priv->mutex);
2220 return ret;
2221}
2222EXPORT_SYMBOL(iwl_legacy_mac_config);
2223
2224void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
2225 struct ieee80211_vif *vif)
2226{
2227 struct iwl_priv *priv = hw->priv;
2228 unsigned long flags;
2229 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2230 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2231
2232 if (WARN_ON(!priv->cfg->ops->legacy))
2233 return;
2234
2235 mutex_lock(&priv->mutex);
2236 IWL_DEBUG_MAC80211(priv, "enter\n");
2237
2238 spin_lock_irqsave(&priv->lock, flags);
2239 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2240 spin_unlock_irqrestore(&priv->lock, flags);
2241
2242 spin_lock_irqsave(&priv->lock, flags);
2243
2244 /* new association get rid of ibss beacon skb */
2245 if (priv->beacon_skb)
2246 dev_kfree_skb(priv->beacon_skb);
2247
2248 priv->beacon_skb = NULL;
2249
2250 priv->timestamp = 0;
2251
2252 spin_unlock_irqrestore(&priv->lock, flags);
2253
2254 iwl_legacy_scan_cancel_timeout(priv, 100);
2255 if (!iwl_legacy_is_ready_rf(priv)) {
2256 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2257 mutex_unlock(&priv->mutex);
2258 return;
2259 }
2260
2261 /* we are restarting association process
2262 * clear RXON_FILTER_ASSOC_MSK bit
2263 */
2264 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2265 iwl_legacy_commit_rxon(priv, ctx);
2266
2267 iwl_legacy_set_rate(priv);
2268
2269 mutex_unlock(&priv->mutex);
2270
2271 IWL_DEBUG_MAC80211(priv, "leave\n");
2272}
2273EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
2274
2275static void iwl_legacy_ht_conf(struct iwl_priv *priv,
2276 struct ieee80211_vif *vif)
2277{
2278 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2279 struct ieee80211_sta *sta;
2280 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2281 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2282
2283 IWL_DEBUG_ASSOC(priv, "enter:\n");
2284
2285 if (!ctx->ht.enabled)
2286 return;
2287
2288 ctx->ht.protection =
2289 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2290 ctx->ht.non_gf_sta_present =
2291 !!(bss_conf->ht_operation_mode &
2292 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2293
2294 ht_conf->single_chain_sufficient = false;
2295
2296 switch (vif->type) {
2297 case NL80211_IFTYPE_STATION:
2298 rcu_read_lock();
2299 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2300 if (sta) {
2301 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2302 int maxstreams;
2303
2304 maxstreams = (ht_cap->mcs.tx_params &
2305 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2306 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2307 maxstreams += 1;
2308
2309 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2310 (ht_cap->mcs.rx_mask[2] == 0))
2311 ht_conf->single_chain_sufficient = true;
2312 if (maxstreams <= 1)
2313 ht_conf->single_chain_sufficient = true;
2314 } else {
2315 /*
2316 * If at all, this can only happen through a race
2317 * when the AP disconnects us while we're still
2318 * setting up the connection, in that case mac80211
2319 * will soon tell us about that.
2320 */
2321 ht_conf->single_chain_sufficient = true;
2322 }
2323 rcu_read_unlock();
2324 break;
2325 case NL80211_IFTYPE_ADHOC:
2326 ht_conf->single_chain_sufficient = true;
2327 break;
2328 default:
2329 break;
2330 }
2331
2332 IWL_DEBUG_ASSOC(priv, "leave\n");
2333}
2334
2335static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
2336 struct ieee80211_vif *vif)
2337{
2338 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2339
2340 /*
2341 * inform the ucode that there is no longer an
2342 * association and that no more packets should be
2343 * sent
2344 */
2345 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2346 ctx->staging.assoc_id = 0;
2347 iwl_legacy_commit_rxon(priv, ctx);
2348}
2349
2350static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
2351 struct ieee80211_vif *vif)
2352{
2353 struct iwl_priv *priv = hw->priv;
2354 unsigned long flags;
2355 __le64 timestamp;
2356 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
2357
2358 if (!skb)
2359 return;
2360
2361 IWL_DEBUG_MAC80211(priv, "enter\n");
2362
2363 lockdep_assert_held(&priv->mutex);
2364
2365 if (!priv->beacon_ctx) {
2366 IWL_ERR(priv, "update beacon but no beacon context!\n");
2367 dev_kfree_skb(skb);
2368 return;
2369 }
2370
2371 spin_lock_irqsave(&priv->lock, flags);
2372
2373 if (priv->beacon_skb)
2374 dev_kfree_skb(priv->beacon_skb);
2375
2376 priv->beacon_skb = skb;
2377
2378 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2379 priv->timestamp = le64_to_cpu(timestamp);
2380
2381 IWL_DEBUG_MAC80211(priv, "leave\n");
2382 spin_unlock_irqrestore(&priv->lock, flags);
2383
2384 if (!iwl_legacy_is_ready_rf(priv)) {
2385 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2386 return;
2387 }
2388
2389 priv->cfg->ops->legacy->post_associate(priv);
2390}
2391
2392void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2393 struct ieee80211_vif *vif,
2394 struct ieee80211_bss_conf *bss_conf,
2395 u32 changes)
2396{
2397 struct iwl_priv *priv = hw->priv;
2398 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2399 int ret;
2400
2401 if (WARN_ON(!priv->cfg->ops->legacy))
2402 return;
2403
2404 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2405
2406 mutex_lock(&priv->mutex);
2407
2408 if (!iwl_legacy_is_alive(priv)) {
2409 mutex_unlock(&priv->mutex);
2410 return;
2411 }
2412
2413 if (changes & BSS_CHANGED_QOS) {
2414 unsigned long flags;
2415
2416 spin_lock_irqsave(&priv->lock, flags);
2417 ctx->qos_data.qos_active = bss_conf->qos;
2418 iwl_legacy_update_qos(priv, ctx);
2419 spin_unlock_irqrestore(&priv->lock, flags);
2420 }
2421
2422 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2423 /*
2424 * the add_interface code must make sure we only ever
2425 * have a single interface that could be beaconing at
2426 * any time.
2427 */
2428 if (vif->bss_conf.enable_beacon)
2429 priv->beacon_ctx = ctx;
2430 else
2431 priv->beacon_ctx = NULL;
2432 }
2433
2434 if (changes & BSS_CHANGED_BSSID) {
2435 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2436
2437 /*
2438 * If there is currently a HW scan going on in the
2439 * background then we need to cancel it else the RXON
2440 * below/in post_associate will fail.
2441 */
2442 if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
2443 IWL_WARN(priv,
2444 "Aborted scan still in progress after 100ms\n");
2445 IWL_DEBUG_MAC80211(priv,
2446 "leaving - scan abort failed.\n");
2447 mutex_unlock(&priv->mutex);
2448 return;
2449 }
2450
2451 /* mac80211 only sets assoc when in STATION mode */
2452 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2453 memcpy(ctx->staging.bssid_addr,
2454 bss_conf->bssid, ETH_ALEN);
2455
2456 /* currently needed in a few places */
2457 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2458 } else {
2459 ctx->staging.filter_flags &=
2460 ~RXON_FILTER_ASSOC_MSK;
2461 }
2462
2463 }
2464
2465 /*
2466 * This needs to be after setting the BSSID in case
2467 * mac80211 decides to do both changes at once because
2468 * it will invoke post_associate.
2469 */
2470 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
2471 iwl_legacy_beacon_update(hw, vif);
2472
2473 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2474 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2475 bss_conf->use_short_preamble);
2476 if (bss_conf->use_short_preamble)
2477 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2478 else
2479 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2480 }
2481
2482 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2483 IWL_DEBUG_MAC80211(priv,
2484 "ERP_CTS %d\n", bss_conf->use_cts_prot);
2485 if (bss_conf->use_cts_prot &&
2486 (priv->band != IEEE80211_BAND_5GHZ))
2487 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
2488 else
2489 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2490 if (bss_conf->use_cts_prot)
2491 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
2492 else
2493 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
2494 }
2495
2496 if (changes & BSS_CHANGED_BASIC_RATES) {
2497 /* XXX use this information
2498 *
2499 * To do that, remove code from iwl_legacy_set_rate() and put something
2500 * like this here:
2501 *
2502 if (A-band)
2503 ctx->staging.ofdm_basic_rates =
2504 bss_conf->basic_rates;
2505 else
2506 ctx->staging.ofdm_basic_rates =
2507 bss_conf->basic_rates >> 4;
2508 ctx->staging.cck_basic_rates =
2509 bss_conf->basic_rates & 0xF;
2510 */
2511 }
2512
2513 if (changes & BSS_CHANGED_HT) {
2514 iwl_legacy_ht_conf(priv, vif);
2515
2516 if (priv->cfg->ops->hcmd->set_rxon_chain)
2517 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2518 }
2519
2520 if (changes & BSS_CHANGED_ASSOC) {
2521 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2522 if (bss_conf->assoc) {
2523 priv->timestamp = bss_conf->timestamp;
2524
2525 if (!iwl_legacy_is_rfkill(priv))
2526 priv->cfg->ops->legacy->post_associate(priv);
2527 } else
2528 iwl_legacy_set_no_assoc(priv, vif);
2529 }
2530
2531 if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
2532 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2533 changes);
2534 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
2535 if (!ret) {
2536 /* Sync active_rxon with latest change. */
2537 memcpy((void *)&ctx->active,
2538 &ctx->staging,
2539 sizeof(struct iwl_legacy_rxon_cmd));
2540 }
2541 }
2542
2543 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2544 if (vif->bss_conf.enable_beacon) {
2545 memcpy(ctx->staging.bssid_addr,
2546 bss_conf->bssid, ETH_ALEN);
2547 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2548 priv->cfg->ops->legacy->config_ap(priv);
2549 } else
2550 iwl_legacy_set_no_assoc(priv, vif);
2551 }
2552
2553 if (changes & BSS_CHANGED_IBSS) {
2554 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
2555 bss_conf->ibss_joined);
2556 if (ret)
2557 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
2558 bss_conf->ibss_joined ? "add" : "remove",
2559 bss_conf->bssid);
2560 }
2561
2562 mutex_unlock(&priv->mutex);
2563
2564 IWL_DEBUG_MAC80211(priv, "leave\n");
2565}
2566EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
2567
2568irqreturn_t iwl_legacy_isr(int irq, void *data)
2569{
2570 struct iwl_priv *priv = data;
2571 u32 inta, inta_mask;
2572 u32 inta_fh;
2573 unsigned long flags;
2574 if (!priv)
2575 return IRQ_NONE;
2576
2577 spin_lock_irqsave(&priv->lock, flags);
2578
2579 /* Disable (but don't clear!) interrupts here to avoid
2580 * back-to-back ISRs and sporadic interrupts from our NIC.
2581 * If we have something to service, the tasklet will re-enable ints.
2582 * If we *don't* have something, we'll re-enable before leaving here. */
2583 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
2584 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
2585
2586 /* Discover which interrupts are active/pending */
2587 inta = iwl_read32(priv, CSR_INT);
2588 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
2589
2590 /* Ignore interrupt if there's nothing in NIC to service.
2591 * This may be due to IRQ shared with another device,
2592 * or due to sporadic interrupts thrown from our NIC. */
2593 if (!inta && !inta_fh) {
2594 IWL_DEBUG_ISR(priv,
2595 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2596 goto none;
2597 }
2598
2599 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
2600 /* Hardware disappeared. It might have already raised
2601 * an interrupt */
2602 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
2603 goto unplugged;
2604 }
2605
2606 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2607 inta, inta_mask, inta_fh);
2608
2609 inta &= ~CSR_INT_BIT_SCD;
2610
2611 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2612 if (likely(inta || inta_fh))
2613 tasklet_schedule(&priv->irq_tasklet);
2614
2615unplugged:
2616 spin_unlock_irqrestore(&priv->lock, flags);
2617 return IRQ_HANDLED;
2618
2619none:
2620 /* re-enable interrupts here since we don't have anything to service. */
2621 /* only Re-enable if disabled by irq */
2622 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2623 iwl_legacy_enable_interrupts(priv);
2624 spin_unlock_irqrestore(&priv->lock, flags);
2625 return IRQ_NONE;
2626}
2627EXPORT_SYMBOL(iwl_legacy_isr);
2628
2629/*
2630 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2631 * function.
2632 */
2633void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
2634 struct ieee80211_tx_info *info,
2635 __le16 fc, __le32 *tx_flags)
2636{
2637 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2638 *tx_flags |= TX_CMD_FLG_RTS_MSK;
2639 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2640 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2641
2642 if (!ieee80211_is_mgmt(fc))
2643 return;
2644
2645 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2646 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2647 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2648 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2649 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2650 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2651 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2652 break;
2653 }
2654 } else if (info->control.rates[0].flags &
2655 IEEE80211_TX_RC_USE_CTS_PROTECT) {
2656 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2657 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2658 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2659 }
2660}
2661EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
deleted file mode 100644
index d1271fe07d4b..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ /dev/null
@@ -1,636 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_core_h__
64#define __iwl_legacy_core_h__
65
66/************************
67 * forward declarations *
68 ************************/
69struct iwl_host_cmd;
70struct iwl_cmd;
71
72
73#define IWLWIFI_VERSION "in-tree:"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82#define TIME_UNIT 1024
83
84#define IWL_SKU_G 0x1
85#define IWL_SKU_A 0x2
86#define IWL_SKU_N 0x8
87
88#define IWL_CMD(x) case x: return #x
89
90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
95};
96
97struct iwl_hcmd_utils_ops {
98 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
99 u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
100 u8 *data);
101 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
102 void (*post_scan)(struct iwl_priv *priv);
103};
104
105struct iwl_apm_ops {
106 int (*init)(struct iwl_priv *priv);
107 void (*config)(struct iwl_priv *priv);
108};
109
110struct iwl_debugfs_ops {
111 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
112 size_t count, loff_t *ppos);
113 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
114 size_t count, loff_t *ppos);
115 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
116 size_t count, loff_t *ppos);
117};
118
119struct iwl_temp_ops {
120 void (*temperature)(struct iwl_priv *priv);
121};
122
123struct iwl_lib_ops {
124 /* set hw dependent parameters */
125 int (*set_hw_params)(struct iwl_priv *priv);
126 /* Handling TX */
127 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
128 struct iwl_tx_queue *txq,
129 u16 byte_cnt);
130 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
131 struct iwl_tx_queue *txq,
132 dma_addr_t addr,
133 u16 len, u8 reset, u8 pad);
134 void (*txq_free_tfd)(struct iwl_priv *priv,
135 struct iwl_tx_queue *txq);
136 int (*txq_init)(struct iwl_priv *priv,
137 struct iwl_tx_queue *txq);
138 /* setup Rx handler */
139 void (*rx_handler_setup)(struct iwl_priv *priv);
140 /* alive notification after init uCode load */
141 void (*init_alive_start)(struct iwl_priv *priv);
142 /* check validity of rtc data address */
143 int (*is_valid_rtc_data_addr)(u32 addr);
144 /* 1st ucode load */
145 int (*load_ucode)(struct iwl_priv *priv);
146
147 void (*dump_nic_error_log)(struct iwl_priv *priv);
148 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
149 int (*set_channel_switch)(struct iwl_priv *priv,
150 struct ieee80211_channel_switch *ch_switch);
151 /* power management */
152 struct iwl_apm_ops apm_ops;
153
154 /* power */
155 int (*send_tx_power) (struct iwl_priv *priv);
156 void (*update_chain_flags)(struct iwl_priv *priv);
157
158 /* eeprom operations (as defined in iwl-eeprom.h) */
159 struct iwl_eeprom_ops eeprom_ops;
160
161 /* temperature */
162 struct iwl_temp_ops temp_ops;
163
164 struct iwl_debugfs_ops debugfs_ops;
165
166};
167
168struct iwl_led_ops {
169 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
170};
171
172struct iwl_legacy_ops {
173 void (*post_associate)(struct iwl_priv *priv);
174 void (*config_ap)(struct iwl_priv *priv);
175 /* station management */
176 int (*update_bcast_stations)(struct iwl_priv *priv);
177 int (*manage_ibss_station)(struct iwl_priv *priv,
178 struct ieee80211_vif *vif, bool add);
179};
180
181struct iwl_ops {
182 const struct iwl_lib_ops *lib;
183 const struct iwl_hcmd_ops *hcmd;
184 const struct iwl_hcmd_utils_ops *utils;
185 const struct iwl_led_ops *led;
186 const struct iwl_nic_ops *nic;
187 const struct iwl_legacy_ops *legacy;
188 const struct ieee80211_ops *ieee80211_ops;
189};
190
191struct iwl_mod_params {
192 int sw_crypto; /* def: 0 = using hardware encryption */
193 int disable_hw_scan; /* def: 0 = use h/w scan */
194 int num_of_queues; /* def: HW dependent */
195 int disable_11n; /* def: 0 = 11n capabilities enabled */
196 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
197 int antenna; /* def: 0 = both antennas (use diversity) */
198 int restart_fw; /* def: 1 = restart firmware */
199};
200
201/*
202 * @led_compensation: compensate on the led on/off time per HW according
203 * to the deviation to achieve the desired led frequency.
204 * The detail algorithm is described in iwl-led.c
205 * @chain_noise_num_beacons: number of beacons used to compute chain noise
206 * @wd_timeout: TX queues watchdog timeout
207 * @temperature_kelvin: temperature report by uCode in kelvin
208 * @ucode_tracing: support ucode continuous tracing
209 * @sensitivity_calib_by_driver: driver has the capability to perform
210 * sensitivity calibration operation
211 * @chain_noise_calib_by_driver: driver has the capability to perform
212 * chain noise calibration operation
213 */
214struct iwl_base_params {
215 int eeprom_size;
216 int num_of_queues; /* def: HW dependent */
217 int num_of_ampdu_queues;/* def: HW dependent */
218 /* for iwl_legacy_apm_init() */
219 u32 pll_cfg_val;
220 bool set_l0s;
221 bool use_bsm;
222
223 u16 led_compensation;
224 int chain_noise_num_beacons;
225 unsigned int wd_timeout;
226 bool temperature_kelvin;
227 const bool ucode_tracing;
228 const bool sensitivity_calib_by_driver;
229 const bool chain_noise_calib_by_driver;
230};
231
232/**
233 * struct iwl_cfg
234 * @fw_name_pre: Firmware filename prefix. The api version and extension
235 * (.ucode) will be added to filename before loading from disk. The
236 * filename is constructed as fw_name_pre<api>.ucode.
237 * @ucode_api_max: Highest version of uCode API supported by driver.
238 * @ucode_api_min: Lowest version of uCode API supported by driver.
239 * @scan_antennas: available antenna for scan operation
240 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
241 *
242 * We enable the driver to be backward compatible wrt API version. The
243 * driver specifies which APIs it supports (with @ucode_api_max being the
244 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
245 * it has a supported API version. The firmware's API version will be
246 * stored in @iwl_priv, enabling the driver to make runtime changes based
247 * on firmware version used.
248 *
249 * For example,
250 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
251 * Driver interacts with Firmware API version >= 2.
252 * } else {
253 * Driver interacts with Firmware API version 1.
254 * }
255 *
256 * The ideal usage of this infrastructure is to treat a new ucode API
257 * release as a new hardware revision. That is, through utilizing the
258 * iwl_hcmd_utils_ops etc. we accommodate different command structures
259 * and flows between hardware versions as well as their API
260 * versions.
261 *
262 */
263struct iwl_cfg {
264 /* params specific to an individual device within a device family */
265 const char *name;
266 const char *fw_name_pre;
267 const unsigned int ucode_api_max;
268 const unsigned int ucode_api_min;
269 u8 valid_tx_ant;
270 u8 valid_rx_ant;
271 unsigned int sku;
272 u16 eeprom_ver;
273 u16 eeprom_calib_ver;
274 const struct iwl_ops *ops;
275 /* module based parameters which can be set from modprobe cmd */
276 const struct iwl_mod_params *mod_params;
277 /* params not likely to change within a device family */
278 struct iwl_base_params *base_params;
279 /* params likely to change within a device family */
280 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
281 enum iwl_led_mode led_mode;
282};
283
284/***************************
285 * L i b *
286 ***************************/
287
288struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
289int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
290 struct ieee80211_vif *vif, u16 queue,
291 const struct ieee80211_tx_queue_params *params);
292int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
293void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
294 struct iwl_rxon_context *ctx,
295 int hw_decrypt);
296int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
297 struct iwl_rxon_context *ctx);
298int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
299 struct iwl_rxon_context *ctx);
300int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
301 struct ieee80211_channel *ch,
302 struct iwl_rxon_context *ctx);
303void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
304 struct iwl_rxon_context *ctx,
305 enum ieee80211_band band,
306 struct ieee80211_vif *vif);
307u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
308 enum ieee80211_band band);
309void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
310 struct iwl_ht_config *ht_conf);
311bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
312 struct iwl_rxon_context *ctx,
313 struct ieee80211_sta_ht_cap *ht_cap);
314void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
315 struct iwl_rxon_context *ctx);
316void iwl_legacy_set_rate(struct iwl_priv *priv);
317int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
318 struct ieee80211_hdr *hdr,
319 u32 decrypt_res,
320 struct ieee80211_rx_status *stats);
321void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
322int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
323 struct ieee80211_vif *vif);
324void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
325 struct ieee80211_vif *vif);
326int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
327 struct ieee80211_vif *vif,
328 enum nl80211_iftype newtype, bool newp2p);
329int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
330void iwl_legacy_txq_mem(struct iwl_priv *priv);
331
332#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
333int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
334void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
335void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
336void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
337 u16 length, struct ieee80211_hdr *header);
338void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
339 u16 length, struct ieee80211_hdr *header);
340const char *iwl_legacy_get_mgmt_string(int cmd);
341const char *iwl_legacy_get_ctrl_string(int cmd);
342void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
343void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
344 u16 len);
345#else
346static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
347{
348 return 0;
349}
350static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
351{
352}
353static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
354{
355}
356static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
357 u16 length, struct ieee80211_hdr *header)
358{
359}
360static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
361 u16 length, struct ieee80211_hdr *header)
362{
363}
364static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
365 __le16 fc, u16 len)
366{
367}
368#endif
369/*****************************************************
370 * RX handlers.
371 * **************************************************/
372void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
373 struct iwl_rx_mem_buffer *rxb);
374void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
375 struct iwl_rx_mem_buffer *rxb);
376void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
377 struct iwl_rx_mem_buffer *rxb);
378
379/*****************************************************
380* RX
381******************************************************/
382void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
383void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
384int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
385void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
386 struct iwl_rx_queue *q);
387int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
388void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
389 struct iwl_rx_mem_buffer *rxb);
390/* Handlers */
391void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
392 struct iwl_rx_mem_buffer *rxb);
393void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
394 struct iwl_rx_packet *pkt);
395void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
396void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
397
398/* TX helpers */
399
400/*****************************************************
401* TX
402******************************************************/
403void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
404 struct iwl_tx_queue *txq);
405int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
406 int slots_num, u32 txq_id);
407void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
408 struct iwl_tx_queue *txq,
409 int slots_num, u32 txq_id);
410void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
411void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
412void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
413/*****************************************************
414 * TX power
415 ****************************************************/
416int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
417
418/*******************************************************************************
419 * Rate
420 ******************************************************************************/
421
422u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
423 struct iwl_rxon_context *ctx);
424
425/*******************************************************************************
426 * Scanning
427 ******************************************************************************/
428void iwl_legacy_init_scan_params(struct iwl_priv *priv);
429int iwl_legacy_scan_cancel(struct iwl_priv *priv);
430int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
431void iwl_legacy_force_scan_end(struct iwl_priv *priv);
432int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
433 struct ieee80211_vif *vif,
434 struct cfg80211_scan_request *req);
435void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
436int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
437u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
438 struct ieee80211_mgmt *frame,
439 const u8 *ta, const u8 *ie, int ie_len, int left);
440void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
441u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
442 enum ieee80211_band band,
443 u8 n_probes);
444u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
445 enum ieee80211_band band,
446 struct ieee80211_vif *vif);
447void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
448void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
449
450/* For faster active scanning, scan will move to the next channel if fewer than
451 * PLCP_QUIET_THRESH packets are heard on this channel within
452 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
453 * time if it's a quiet channel (nothing responded to our probe, and there's
454 * no other traffic).
455 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
456#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
457#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
458
459#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
460
461/*****************************************************
462 * S e n d i n g H o s t C o m m a n d s *
463 *****************************************************/
464
465const char *iwl_legacy_get_cmd_string(u8 cmd);
466int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
467 struct iwl_host_cmd *cmd);
468int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
469int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
470 u16 len, const void *data);
471int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
472 const void *data,
473 void (*callback)(struct iwl_priv *priv,
474 struct iwl_device_cmd *cmd,
475 struct iwl_rx_packet *pkt));
476
477int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
478
479
480/*****************************************************
481 * PCI *
482 *****************************************************/
483
484static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
485{
486 int pos;
487 u16 pci_lnk_ctl;
488 pos = pci_pcie_cap(priv->pci_dev);
489 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
490 return pci_lnk_ctl;
491}
492
493void iwl_legacy_bg_watchdog(unsigned long data);
494u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
495 u32 usec, u32 beacon_interval);
496__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
497 u32 addon, u32 beacon_interval);
498
499#ifdef CONFIG_PM
500int iwl_legacy_pci_suspend(struct device *device);
501int iwl_legacy_pci_resume(struct device *device);
502extern const struct dev_pm_ops iwl_legacy_pm_ops;
503
504#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
505
506#else /* !CONFIG_PM */
507
508#define IWL_LEGACY_PM_OPS NULL
509
510#endif /* !CONFIG_PM */
511
512/*****************************************************
513* Error Handling Debugging
514******************************************************/
515void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
516#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
517void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
518 struct iwl_rxon_context *ctx);
519#else
520static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
521 struct iwl_rxon_context *ctx)
522{
523}
524#endif
525
526void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
527
528/*****************************************************
529* GEOS
530******************************************************/
531int iwl_legacy_init_geos(struct iwl_priv *priv);
532void iwl_legacy_free_geos(struct iwl_priv *priv);
533
534/*************** DRIVER STATUS FUNCTIONS *****/
535
536#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
537/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
538#define STATUS_INT_ENABLED 2
539#define STATUS_RF_KILL_HW 3
540#define STATUS_CT_KILL 4
541#define STATUS_INIT 5
542#define STATUS_ALIVE 6
543#define STATUS_READY 7
544#define STATUS_TEMPERATURE 8
545#define STATUS_GEO_CONFIGURED 9
546#define STATUS_EXIT_PENDING 10
547#define STATUS_STATISTICS 12
548#define STATUS_SCANNING 13
549#define STATUS_SCAN_ABORTING 14
550#define STATUS_SCAN_HW 15
551#define STATUS_POWER_PMI 16
552#define STATUS_FW_ERROR 17
553#define STATUS_CHANNEL_SWITCH_PENDING 18
554
555static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
556{
557 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
558 * set but EXIT_PENDING is not */
559 return test_bit(STATUS_READY, &priv->status) &&
560 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
561 !test_bit(STATUS_EXIT_PENDING, &priv->status);
562}
563
564static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
565{
566 return test_bit(STATUS_ALIVE, &priv->status);
567}
568
569static inline int iwl_legacy_is_init(struct iwl_priv *priv)
570{
571 return test_bit(STATUS_INIT, &priv->status);
572}
573
574static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
575{
576 return test_bit(STATUS_RF_KILL_HW, &priv->status);
577}
578
579static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
580{
581 return iwl_legacy_is_rfkill_hw(priv);
582}
583
584static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
585{
586 return test_bit(STATUS_CT_KILL, &priv->status);
587}
588
589static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
590{
591
592 if (iwl_legacy_is_rfkill(priv))
593 return 0;
594
595 return iwl_legacy_is_ready(priv);
596}
597
598extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
599extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
600 u8 flags, bool clear);
601void iwl_legacy_apm_stop(struct iwl_priv *priv);
602int iwl_legacy_apm_init(struct iwl_priv *priv);
603
604int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
605 struct iwl_rxon_context *ctx);
606static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
607 struct iwl_rxon_context *ctx)
608{
609 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
610}
611static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
612 struct iwl_rxon_context *ctx)
613{
614 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
615}
616static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
617 struct iwl_priv *priv, enum ieee80211_band band)
618{
619 return priv->hw->wiphy->bands[band];
620}
621
622/* mac80211 handlers */
623int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
624void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
625 struct ieee80211_vif *vif);
626void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
627 struct ieee80211_vif *vif,
628 struct ieee80211_bss_conf *bss_conf,
629 u32 changes);
630void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
631 struct ieee80211_tx_info *info,
632 __le16 fc, __le32 *tx_flags);
633
634irqreturn_t iwl_legacy_isr(int irq, void *data);
635
636#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
deleted file mode 100644
index ae13112701bf..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debug.h
+++ /dev/null
@@ -1,198 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_debug_h__
30#define __iwl_legacy_debug_h__
31
32struct iwl_priv;
33extern u32 iwlegacy_debug_level;
34
35#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
36#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
39
40#define iwl_print_hex_error(priv, p, len) \
41do { \
42 print_hex_dump(KERN_ERR, "iwl data: ", \
43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
44} while (0)
45
46#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
47#define IWL_DEBUG(__priv, level, fmt, args...) \
48do { \
49 if (iwl_legacy_get_debug_level(__priv) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \
53} while (0)
54
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
56do { \
57 if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \
61} while (0)
62
63#define iwl_print_hex_dump(priv, level, p, len) \
64do { \
65 if (iwl_legacy_get_debug_level(priv) & level) \
66 print_hex_dump(KERN_DEBUG, "iwl data: ", \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0)
69
70#else
71#define IWL_DEBUG(__priv, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
74 const void *p, u32 len)
75{}
76#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
77
78#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
79int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
81#else
82static inline int
83iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
84{
85 return 0;
86}
87static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
88{
89}
90#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
91
92/*
93 * To use the debug system:
94 *
95 * If you are defining a new debug classification, simply add it to the #define
96 * list here in the form of
97 *
98 * #define IWL_DL_xxxx VALUE
99 *
100 * where xxxx should be the name of the classification (for example, WEP).
101 *
102 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
103 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
104 * to send output to that classification.
105 *
106 * The active debug levels can be accessed via files
107 *
108 * /sys/module/iwl4965/parameters/debug{50}
109 * /sys/module/iwl3945/parameters/debug
110 * /sys/class/net/wlan0/device/debug_level
111 *
112 * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
113 */
114
115/* 0x0000000F - 0x00000001 */
116#define IWL_DL_INFO (1 << 0)
117#define IWL_DL_MAC80211 (1 << 1)
118#define IWL_DL_HCMD (1 << 2)
119#define IWL_DL_STATE (1 << 3)
120/* 0x000000F0 - 0x00000010 */
121#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_HCMD_DUMP (1 << 5)
123#define IWL_DL_EEPROM (1 << 6)
124#define IWL_DL_RADIO (1 << 7)
125/* 0x00000F00 - 0x00000100 */
126#define IWL_DL_POWER (1 << 8)
127#define IWL_DL_TEMP (1 << 9)
128#define IWL_DL_NOTIF (1 << 10)
129#define IWL_DL_SCAN (1 << 11)
130/* 0x0000F000 - 0x00001000 */
131#define IWL_DL_ASSOC (1 << 12)
132#define IWL_DL_DROP (1 << 13)
133#define IWL_DL_TXPOWER (1 << 14)
134#define IWL_DL_AP (1 << 15)
135/* 0x000F0000 - 0x00010000 */
136#define IWL_DL_FW (1 << 16)
137#define IWL_DL_RF_KILL (1 << 17)
138#define IWL_DL_FW_ERRORS (1 << 18)
139#define IWL_DL_LED (1 << 19)
140/* 0x00F00000 - 0x00100000 */
141#define IWL_DL_RATE (1 << 20)
142#define IWL_DL_CALIB (1 << 21)
143#define IWL_DL_WEP (1 << 22)
144#define IWL_DL_TX (1 << 23)
145/* 0x0F000000 - 0x01000000 */
146#define IWL_DL_RX (1 << 24)
147#define IWL_DL_ISR (1 << 25)
148#define IWL_DL_HT (1 << 26)
149#define IWL_DL_IO (1 << 27)
150/* 0xF0000000 - 0x10000000 */
151#define IWL_DL_11H (1 << 28)
152#define IWL_DL_STATS (1 << 29)
153#define IWL_DL_TX_REPLY (1 << 30)
154#define IWL_DL_QOS (1 << 31)
155
156#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
157#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
158#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
159#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
160#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
161#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
162#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
163#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
164#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
165#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
166#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
168#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
169#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
170#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
171#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
172#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
173#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
174 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
175#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
176#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
177#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
178#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
179#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
180 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
181#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
182#define IWL_DEBUG_ASSOC(p, f, a...) \
183 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
184#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
185 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
186#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
187#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
188#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
189 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
190#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
191#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
192 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
193#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
194#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
195#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
196#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
197
198#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
deleted file mode 100644
index 996996a71657..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c
+++ /dev/null
@@ -1,1313 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <net/mac80211.h>
30
31
32#include "iwl-dev.h"
33#include "iwl-debug.h"
34#include "iwl-core.h"
35#include "iwl-io.h"
36
37/* create and remove of files */
38#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
39 if (!debugfs_create_file(#name, mode, parent, priv, \
40 &iwl_legacy_dbgfs_##name##_ops)) \
41 goto err; \
42} while (0)
43
44#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
45 struct dentry *__tmp; \
46 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
47 parent, ptr); \
48 if (IS_ERR(__tmp) || !__tmp) \
49 goto err; \
50} while (0)
51
52#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
53 struct dentry *__tmp; \
54 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
55 parent, ptr); \
56 if (IS_ERR(__tmp) || !__tmp) \
57 goto err; \
58} while (0)
59
60/* file operation */
61#define DEBUGFS_READ_FUNC(name) \
62static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
63 char __user *user_buf, \
64 size_t count, loff_t *ppos);
65
66#define DEBUGFS_WRITE_FUNC(name) \
67static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
68 const char __user *user_buf, \
69 size_t count, loff_t *ppos);
70
71
72static int
73iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
74{
75 file->private_data = inode->i_private;
76 return 0;
77}
78
79#define DEBUGFS_READ_FILE_OPS(name) \
80 DEBUGFS_READ_FUNC(name); \
81static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
82 .read = iwl_legacy_dbgfs_##name##_read, \
83 .open = iwl_legacy_dbgfs_open_file_generic, \
84 .llseek = generic_file_llseek, \
85};
86
87#define DEBUGFS_WRITE_FILE_OPS(name) \
88 DEBUGFS_WRITE_FUNC(name); \
89static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
90 .write = iwl_legacy_dbgfs_##name##_write, \
91 .open = iwl_legacy_dbgfs_open_file_generic, \
92 .llseek = generic_file_llseek, \
93};
94
95#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
96 DEBUGFS_READ_FUNC(name); \
97 DEBUGFS_WRITE_FUNC(name); \
98static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
99 .write = iwl_legacy_dbgfs_##name##_write, \
100 .read = iwl_legacy_dbgfs_##name##_read, \
101 .open = iwl_legacy_dbgfs_open_file_generic, \
102 .llseek = generic_file_llseek, \
103};
104
105static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
106 char __user *user_buf,
107 size_t count, loff_t *ppos) {
108
109 struct iwl_priv *priv = file->private_data;
110 char *buf;
111 int pos = 0;
112
113 int cnt;
114 ssize_t ret;
115 const size_t bufsz = 100 +
116 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
117 buf = kzalloc(bufsz, GFP_KERNEL);
118 if (!buf)
119 return -ENOMEM;
120 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
121 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
122 pos += scnprintf(buf + pos, bufsz - pos,
123 "\t%25s\t\t: %u\n",
124 iwl_legacy_get_mgmt_string(cnt),
125 priv->tx_stats.mgmt[cnt]);
126 }
127 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
128 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
129 pos += scnprintf(buf + pos, bufsz - pos,
130 "\t%25s\t\t: %u\n",
131 iwl_legacy_get_ctrl_string(cnt),
132 priv->tx_stats.ctrl[cnt]);
133 }
134 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
135 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
136 priv->tx_stats.data_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
138 priv->tx_stats.data_bytes);
139 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
140 kfree(buf);
141 return ret;
142}
143
144static ssize_t
145iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
146 const char __user *user_buf,
147 size_t count, loff_t *ppos)
148{
149 struct iwl_priv *priv = file->private_data;
150 u32 clear_flag;
151 char buf[8];
152 int buf_size;
153
154 memset(buf, 0, sizeof(buf));
155 buf_size = min(count, sizeof(buf) - 1);
156 if (copy_from_user(buf, user_buf, buf_size))
157 return -EFAULT;
158 if (sscanf(buf, "%x", &clear_flag) != 1)
159 return -EFAULT;
160 iwl_legacy_clear_traffic_stats(priv);
161
162 return count;
163}
164
165static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
166 char __user *user_buf,
167 size_t count, loff_t *ppos) {
168
169 struct iwl_priv *priv = file->private_data;
170 char *buf;
171 int pos = 0;
172 int cnt;
173 ssize_t ret;
174 const size_t bufsz = 100 +
175 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
176 buf = kzalloc(bufsz, GFP_KERNEL);
177 if (!buf)
178 return -ENOMEM;
179
180 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
181 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
182 pos += scnprintf(buf + pos, bufsz - pos,
183 "\t%25s\t\t: %u\n",
184 iwl_legacy_get_mgmt_string(cnt),
185 priv->rx_stats.mgmt[cnt]);
186 }
187 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
188 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
189 pos += scnprintf(buf + pos, bufsz - pos,
190 "\t%25s\t\t: %u\n",
191 iwl_legacy_get_ctrl_string(cnt),
192 priv->rx_stats.ctrl[cnt]);
193 }
194 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
195 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
196 priv->rx_stats.data_cnt);
197 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
198 priv->rx_stats.data_bytes);
199
200 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
201 kfree(buf);
202 return ret;
203}
204
205#define BYTE1_MASK 0x000000ff;
206#define BYTE2_MASK 0x0000ffff;
207#define BYTE3_MASK 0x00ffffff;
208static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
209 char __user *user_buf,
210 size_t count, loff_t *ppos)
211{
212 u32 val;
213 char *buf;
214 ssize_t ret;
215 int i;
216 int pos = 0;
217 struct iwl_priv *priv = file->private_data;
218 size_t bufsz;
219
220 /* default is to dump the entire data segment */
221 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
222 priv->dbgfs_sram_offset = 0x800000;
223 if (priv->ucode_type == UCODE_INIT)
224 priv->dbgfs_sram_len = priv->ucode_init_data.len;
225 else
226 priv->dbgfs_sram_len = priv->ucode_data.len;
227 }
228 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
229 buf = kmalloc(bufsz, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
232 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
233 priv->dbgfs_sram_len);
234 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
235 priv->dbgfs_sram_offset);
236 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
237 val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
238 priv->dbgfs_sram_len - i);
239 if (i < 4) {
240 switch (i) {
241 case 1:
242 val &= BYTE1_MASK;
243 break;
244 case 2:
245 val &= BYTE2_MASK;
246 break;
247 case 3:
248 val &= BYTE3_MASK;
249 break;
250 }
251 }
252 if (!(i % 16))
253 pos += scnprintf(buf + pos, bufsz - pos, "\n");
254 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
255 }
256 pos += scnprintf(buf + pos, bufsz - pos, "\n");
257
258 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
259 kfree(buf);
260 return ret;
261}
262
263static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
264 const char __user *user_buf,
265 size_t count, loff_t *ppos)
266{
267 struct iwl_priv *priv = file->private_data;
268 char buf[64];
269 int buf_size;
270 u32 offset, len;
271
272 memset(buf, 0, sizeof(buf));
273 buf_size = min(count, sizeof(buf) - 1);
274 if (copy_from_user(buf, user_buf, buf_size))
275 return -EFAULT;
276
277 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
278 priv->dbgfs_sram_offset = offset;
279 priv->dbgfs_sram_len = len;
280 } else {
281 priv->dbgfs_sram_offset = 0;
282 priv->dbgfs_sram_len = 0;
283 }
284
285 return count;
286}
287
288static ssize_t
289iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
290 size_t count, loff_t *ppos)
291{
292 struct iwl_priv *priv = file->private_data;
293 struct iwl_station_entry *station;
294 int max_sta = priv->hw_params.max_stations;
295 char *buf;
296 int i, j, pos = 0;
297 ssize_t ret;
298 /* Add 30 for initial string */
299 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
300
301 buf = kmalloc(bufsz, GFP_KERNEL);
302 if (!buf)
303 return -ENOMEM;
304
305 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
306 priv->num_stations);
307
308 for (i = 0; i < max_sta; i++) {
309 station = &priv->stations[i];
310 if (!station->used)
311 continue;
312 pos += scnprintf(buf + pos, bufsz - pos,
313 "station %d - addr: %pM, flags: %#x\n",
314 i, station->sta.sta.addr,
315 station->sta.station_flags_msk);
316 pos += scnprintf(buf + pos, bufsz - pos,
317 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
318 pos += scnprintf(buf + pos, bufsz - pos,
319 "start_idx\tbitmap\t\t\trate_n_flags\n");
320
321 for (j = 0; j < MAX_TID_COUNT; j++) {
322 pos += scnprintf(buf + pos, bufsz - pos,
323 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
324 j, station->tid[j].seq_number,
325 station->tid[j].agg.txq_id,
326 station->tid[j].agg.frame_count,
327 station->tid[j].tfds_in_queue,
328 station->tid[j].agg.start_idx,
329 station->tid[j].agg.bitmap,
330 station->tid[j].agg.rate_n_flags);
331
332 if (station->tid[j].agg.wait_for_ba)
333 pos += scnprintf(buf + pos, bufsz - pos,
334 " - waitforba");
335 pos += scnprintf(buf + pos, bufsz - pos, "\n");
336 }
337
338 pos += scnprintf(buf + pos, bufsz - pos, "\n");
339 }
340
341 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
342 kfree(buf);
343 return ret;
344}
345
346static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
347 char __user *user_buf,
348 size_t count,
349 loff_t *ppos)
350{
351 ssize_t ret;
352 struct iwl_priv *priv = file->private_data;
353 int pos = 0, ofs = 0, buf_size = 0;
354 const u8 *ptr;
355 char *buf;
356 u16 eeprom_ver;
357 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
358 buf_size = 4 * eeprom_len + 256;
359
360 if (eeprom_len % 16) {
361 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
362 return -ENODATA;
363 }
364
365 ptr = priv->eeprom;
366 if (!ptr) {
367 IWL_ERR(priv, "Invalid EEPROM memory\n");
368 return -ENOMEM;
369 }
370
371 /* 4 characters for byte 0xYY */
372 buf = kzalloc(buf_size, GFP_KERNEL);
373 if (!buf) {
374 IWL_ERR(priv, "Can not allocate Buffer\n");
375 return -ENOMEM;
376 }
377 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
378 pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
379 "version: 0x%x\n", eeprom_ver);
380 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
381 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
382 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
383 buf_size - pos, 0);
384 pos += strlen(buf + pos);
385 if (buf_size - pos > 0)
386 buf[pos++] = '\n';
387 }
388
389 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
390 kfree(buf);
391 return ret;
392}
393
394static ssize_t
395iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
396 size_t count, loff_t *ppos)
397{
398 struct iwl_priv *priv = file->private_data;
399 struct ieee80211_channel *channels = NULL;
400 const struct ieee80211_supported_band *supp_band = NULL;
401 int pos = 0, i, bufsz = PAGE_SIZE;
402 char *buf;
403 ssize_t ret;
404
405 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
406 return -EAGAIN;
407
408 buf = kzalloc(bufsz, GFP_KERNEL);
409 if (!buf) {
410 IWL_ERR(priv, "Can not allocate Buffer\n");
411 return -ENOMEM;
412 }
413
414 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
415 if (supp_band) {
416 channels = supp_band->channels;
417
418 pos += scnprintf(buf + pos, bufsz - pos,
419 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
420 supp_band->n_channels);
421
422 for (i = 0; i < supp_band->n_channels; i++)
423 pos += scnprintf(buf + pos, bufsz - pos,
424 "%d: %ddBm: BSS%s%s, %s.\n",
425 channels[i].hw_value,
426 channels[i].max_power,
427 channels[i].flags & IEEE80211_CHAN_RADAR ?
428 " (IEEE 802.11h required)" : "",
429 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
430 || (channels[i].flags &
431 IEEE80211_CHAN_RADAR)) ? "" :
432 ", IBSS",
433 channels[i].flags &
434 IEEE80211_CHAN_PASSIVE_SCAN ?
435 "passive only" : "active/passive");
436 }
437 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
438 if (supp_band) {
439 channels = supp_band->channels;
440
441 pos += scnprintf(buf + pos, bufsz - pos,
442 "Displaying %d channels in 5.2GHz band (802.11a)\n",
443 supp_band->n_channels);
444
445 for (i = 0; i < supp_band->n_channels; i++)
446 pos += scnprintf(buf + pos, bufsz - pos,
447 "%d: %ddBm: BSS%s%s, %s.\n",
448 channels[i].hw_value,
449 channels[i].max_power,
450 channels[i].flags & IEEE80211_CHAN_RADAR ?
451 " (IEEE 802.11h required)" : "",
452 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
453 || (channels[i].flags &
454 IEEE80211_CHAN_RADAR)) ? "" :
455 ", IBSS",
456 channels[i].flags &
457 IEEE80211_CHAN_PASSIVE_SCAN ?
458 "passive only" : "active/passive");
459 }
460 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
461 kfree(buf);
462 return ret;
463}
464
465static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
466 char __user *user_buf,
467 size_t count, loff_t *ppos) {
468
469 struct iwl_priv *priv = file->private_data;
470 char buf[512];
471 int pos = 0;
472 const size_t bufsz = sizeof(buf);
473
474 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
475 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
476 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
477 test_bit(STATUS_INT_ENABLED, &priv->status));
478 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
479 test_bit(STATUS_RF_KILL_HW, &priv->status));
480 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
481 test_bit(STATUS_CT_KILL, &priv->status));
482 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
483 test_bit(STATUS_INIT, &priv->status));
484 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
485 test_bit(STATUS_ALIVE, &priv->status));
486 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
487 test_bit(STATUS_READY, &priv->status));
488 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
489 test_bit(STATUS_TEMPERATURE, &priv->status));
490 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
491 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
492 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
493 test_bit(STATUS_EXIT_PENDING, &priv->status));
494 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
495 test_bit(STATUS_STATISTICS, &priv->status));
496 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
497 test_bit(STATUS_SCANNING, &priv->status));
498 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
499 test_bit(STATUS_SCAN_ABORTING, &priv->status));
500 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
501 test_bit(STATUS_SCAN_HW, &priv->status));
502 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
503 test_bit(STATUS_POWER_PMI, &priv->status));
504 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
505 test_bit(STATUS_FW_ERROR, &priv->status));
506 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
507}
508
509static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
510 char __user *user_buf,
511 size_t count, loff_t *ppos) {
512
513 struct iwl_priv *priv = file->private_data;
514 int pos = 0;
515 int cnt = 0;
516 char *buf;
517 int bufsz = 24 * 64; /* 24 items * 64 char per item */
518 ssize_t ret;
519
520 buf = kzalloc(bufsz, GFP_KERNEL);
521 if (!buf) {
522 IWL_ERR(priv, "Can not allocate Buffer\n");
523 return -ENOMEM;
524 }
525
526 pos += scnprintf(buf + pos, bufsz - pos,
527 "Interrupt Statistics Report:\n");
528
529 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
530 priv->isr_stats.hw);
531 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
532 priv->isr_stats.sw);
533 if (priv->isr_stats.sw || priv->isr_stats.hw) {
534 pos += scnprintf(buf + pos, bufsz - pos,
535 "\tLast Restarting Code: 0x%X\n",
536 priv->isr_stats.err_code);
537 }
538#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
539 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
540 priv->isr_stats.sch);
541 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
542 priv->isr_stats.alive);
543#endif
544 pos += scnprintf(buf + pos, bufsz - pos,
545 "HW RF KILL switch toggled:\t %u\n",
546 priv->isr_stats.rfkill);
547
548 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
549 priv->isr_stats.ctkill);
550
551 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
552 priv->isr_stats.wakeup);
553
554 pos += scnprintf(buf + pos, bufsz - pos,
555 "Rx command responses:\t\t %u\n",
556 priv->isr_stats.rx);
557 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
558 if (priv->isr_stats.rx_handlers[cnt] > 0)
559 pos += scnprintf(buf + pos, bufsz - pos,
560 "\tRx handler[%36s]:\t\t %u\n",
561 iwl_legacy_get_cmd_string(cnt),
562 priv->isr_stats.rx_handlers[cnt]);
563 }
564
565 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
566 priv->isr_stats.tx);
567
568 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
569 priv->isr_stats.unhandled);
570
571 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
572 kfree(buf);
573 return ret;
574}
575
576static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
577 const char __user *user_buf,
578 size_t count, loff_t *ppos)
579{
580 struct iwl_priv *priv = file->private_data;
581 char buf[8];
582 int buf_size;
583 u32 reset_flag;
584
585 memset(buf, 0, sizeof(buf));
586 buf_size = min(count, sizeof(buf) - 1);
587 if (copy_from_user(buf, user_buf, buf_size))
588 return -EFAULT;
589 if (sscanf(buf, "%x", &reset_flag) != 1)
590 return -EFAULT;
591 if (reset_flag == 0)
592 iwl_legacy_clear_isr_stats(priv);
593
594 return count;
595}
596
597static ssize_t
598iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
599 size_t count, loff_t *ppos)
600{
601 struct iwl_priv *priv = file->private_data;
602 struct iwl_rxon_context *ctx;
603 int pos = 0, i;
604 char buf[256 * NUM_IWL_RXON_CTX];
605 const size_t bufsz = sizeof(buf);
606
607 for_each_context(priv, ctx) {
608 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
609 ctx->ctxid);
610 for (i = 0; i < AC_NUM; i++) {
611 pos += scnprintf(buf + pos, bufsz - pos,
612 "\tcw_min\tcw_max\taifsn\ttxop\n");
613 pos += scnprintf(buf + pos, bufsz - pos,
614 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
615 ctx->qos_data.def_qos_parm.ac[i].cw_min,
616 ctx->qos_data.def_qos_parm.ac[i].cw_max,
617 ctx->qos_data.def_qos_parm.ac[i].aifsn,
618 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
619 }
620 pos += scnprintf(buf + pos, bufsz - pos, "\n");
621 }
622 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
623}
624
625static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
626 const char __user *user_buf,
627 size_t count, loff_t *ppos)
628{
629 struct iwl_priv *priv = file->private_data;
630 char buf[8];
631 int buf_size;
632 int ht40;
633
634 memset(buf, 0, sizeof(buf));
635 buf_size = min(count, sizeof(buf) - 1);
636 if (copy_from_user(buf, user_buf, buf_size))
637 return -EFAULT;
638 if (sscanf(buf, "%d", &ht40) != 1)
639 return -EFAULT;
640 if (!iwl_legacy_is_any_associated(priv))
641 priv->disable_ht40 = ht40 ? true : false;
642 else {
643 IWL_ERR(priv, "Sta associated with AP - "
644 "Change to 40MHz channel support is not allowed\n");
645 return -EINVAL;
646 }
647
648 return count;
649}
650
651static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
652 char __user *user_buf,
653 size_t count, loff_t *ppos)
654{
655 struct iwl_priv *priv = file->private_data;
656 char buf[100];
657 int pos = 0;
658 const size_t bufsz = sizeof(buf);
659
660 pos += scnprintf(buf + pos, bufsz - pos,
661 "11n 40MHz Mode: %s\n",
662 priv->disable_ht40 ? "Disabled" : "Enabled");
663 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
664}
665
666DEBUGFS_READ_WRITE_FILE_OPS(sram);
667DEBUGFS_READ_FILE_OPS(nvm);
668DEBUGFS_READ_FILE_OPS(stations);
669DEBUGFS_READ_FILE_OPS(channels);
670DEBUGFS_READ_FILE_OPS(status);
671DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
672DEBUGFS_READ_FILE_OPS(qos);
673DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
674
675static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
676 char __user *user_buf,
677 size_t count, loff_t *ppos)
678{
679 struct iwl_priv *priv = file->private_data;
680 int pos = 0, ofs = 0;
681 int cnt = 0, entry;
682 struct iwl_tx_queue *txq;
683 struct iwl_queue *q;
684 struct iwl_rx_queue *rxq = &priv->rxq;
685 char *buf;
686 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
687 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
688 const u8 *ptr;
689 ssize_t ret;
690
691 if (!priv->txq) {
692 IWL_ERR(priv, "txq not ready\n");
693 return -EAGAIN;
694 }
695 buf = kzalloc(bufsz, GFP_KERNEL);
696 if (!buf) {
697 IWL_ERR(priv, "Can not allocate buffer\n");
698 return -ENOMEM;
699 }
700 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
701 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
702 txq = &priv->txq[cnt];
703 q = &txq->q;
704 pos += scnprintf(buf + pos, bufsz - pos,
705 "q[%d]: read_ptr: %u, write_ptr: %u\n",
706 cnt, q->read_ptr, q->write_ptr);
707 }
708 if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
709 ptr = priv->tx_traffic;
710 pos += scnprintf(buf + pos, bufsz - pos,
711 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
712 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
713 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
714 entry++, ofs += 16) {
715 pos += scnprintf(buf + pos, bufsz - pos,
716 "0x%.4x ", ofs);
717 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
718 buf + pos, bufsz - pos, 0);
719 pos += strlen(buf + pos);
720 if (bufsz - pos > 0)
721 buf[pos++] = '\n';
722 }
723 }
724 }
725
726 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
727 pos += scnprintf(buf + pos, bufsz - pos,
728 "read: %u, write: %u\n",
729 rxq->read, rxq->write);
730
731 if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
732 ptr = priv->rx_traffic;
733 pos += scnprintf(buf + pos, bufsz - pos,
734 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
735 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
736 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
737 entry++, ofs += 16) {
738 pos += scnprintf(buf + pos, bufsz - pos,
739 "0x%.4x ", ofs);
740 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
741 buf + pos, bufsz - pos, 0);
742 pos += strlen(buf + pos);
743 if (bufsz - pos > 0)
744 buf[pos++] = '\n';
745 }
746 }
747 }
748
749 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
750 kfree(buf);
751 return ret;
752}
753
754static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
755 const char __user *user_buf,
756 size_t count, loff_t *ppos)
757{
758 struct iwl_priv *priv = file->private_data;
759 char buf[8];
760 int buf_size;
761 int traffic_log;
762
763 memset(buf, 0, sizeof(buf));
764 buf_size = min(count, sizeof(buf) - 1);
765 if (copy_from_user(buf, user_buf, buf_size))
766 return -EFAULT;
767 if (sscanf(buf, "%d", &traffic_log) != 1)
768 return -EFAULT;
769 if (traffic_log == 0)
770 iwl_legacy_reset_traffic_log(priv);
771
772 return count;
773}
774
775static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
776 char __user *user_buf,
777 size_t count, loff_t *ppos) {
778
779 struct iwl_priv *priv = file->private_data;
780 struct iwl_tx_queue *txq;
781 struct iwl_queue *q;
782 char *buf;
783 int pos = 0;
784 int cnt;
785 int ret;
786 const size_t bufsz = sizeof(char) * 64 *
787 priv->cfg->base_params->num_of_queues;
788
789 if (!priv->txq) {
790 IWL_ERR(priv, "txq not ready\n");
791 return -EAGAIN;
792 }
793 buf = kzalloc(bufsz, GFP_KERNEL);
794 if (!buf)
795 return -ENOMEM;
796
797 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
798 txq = &priv->txq[cnt];
799 q = &txq->q;
800 pos += scnprintf(buf + pos, bufsz - pos,
801 "hwq %.2d: read=%u write=%u stop=%d"
802 " swq_id=%#.2x (ac %d/hwq %d)\n",
803 cnt, q->read_ptr, q->write_ptr,
804 !!test_bit(cnt, priv->queue_stopped),
805 txq->swq_id, txq->swq_id & 3,
806 (txq->swq_id >> 2) & 0x1f);
807 if (cnt >= 4)
808 continue;
809 /* for the ACs, display the stop count too */
810 pos += scnprintf(buf + pos, bufsz - pos,
811 " stop-count: %d\n",
812 atomic_read(&priv->queue_stop_count[cnt]));
813 }
814 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
815 kfree(buf);
816 return ret;
817}
818
819static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
820 char __user *user_buf,
821 size_t count, loff_t *ppos) {
822
823 struct iwl_priv *priv = file->private_data;
824 struct iwl_rx_queue *rxq = &priv->rxq;
825 char buf[256];
826 int pos = 0;
827 const size_t bufsz = sizeof(buf);
828
829 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
830 rxq->read);
831 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
832 rxq->write);
833 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
834 rxq->free_count);
835 if (rxq->rb_stts) {
836 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
837 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
838 } else {
839 pos += scnprintf(buf + pos, bufsz - pos,
840 "closed_rb_num: Not Allocated\n");
841 }
842 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
843}
844
845static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
846 char __user *user_buf,
847 size_t count, loff_t *ppos)
848{
849 struct iwl_priv *priv = file->private_data;
850 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
851 user_buf, count, ppos);
852}
853
854static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
855 char __user *user_buf,
856 size_t count, loff_t *ppos)
857{
858 struct iwl_priv *priv = file->private_data;
859 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
860 user_buf, count, ppos);
861}
862
863static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
864 char __user *user_buf,
865 size_t count, loff_t *ppos)
866{
867 struct iwl_priv *priv = file->private_data;
868 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
869 user_buf, count, ppos);
870}
871
872static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
873 char __user *user_buf,
874 size_t count, loff_t *ppos) {
875
876 struct iwl_priv *priv = file->private_data;
877 int pos = 0;
878 int cnt = 0;
879 char *buf;
880 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
881 ssize_t ret;
882 struct iwl_sensitivity_data *data;
883
884 data = &priv->sensitivity_data;
885 buf = kzalloc(bufsz, GFP_KERNEL);
886 if (!buf) {
887 IWL_ERR(priv, "Can not allocate Buffer\n");
888 return -ENOMEM;
889 }
890
891 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
892 data->auto_corr_ofdm);
893 pos += scnprintf(buf + pos, bufsz - pos,
894 "auto_corr_ofdm_mrc:\t\t %u\n",
895 data->auto_corr_ofdm_mrc);
896 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
897 data->auto_corr_ofdm_x1);
898 pos += scnprintf(buf + pos, bufsz - pos,
899 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
900 data->auto_corr_ofdm_mrc_x1);
901 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
902 data->auto_corr_cck);
903 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
904 data->auto_corr_cck_mrc);
905 pos += scnprintf(buf + pos, bufsz - pos,
906 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
907 data->last_bad_plcp_cnt_ofdm);
908 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
909 data->last_fa_cnt_ofdm);
910 pos += scnprintf(buf + pos, bufsz - pos,
911 "last_bad_plcp_cnt_cck:\t\t %u\n",
912 data->last_bad_plcp_cnt_cck);
913 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
914 data->last_fa_cnt_cck);
915 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
916 data->nrg_curr_state);
917 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
918 data->nrg_prev_state);
919 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
920 for (cnt = 0; cnt < 10; cnt++) {
921 pos += scnprintf(buf + pos, bufsz - pos, " %u",
922 data->nrg_value[cnt]);
923 }
924 pos += scnprintf(buf + pos, bufsz - pos, "\n");
925 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
926 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
927 pos += scnprintf(buf + pos, bufsz - pos, " %u",
928 data->nrg_silence_rssi[cnt]);
929 }
930 pos += scnprintf(buf + pos, bufsz - pos, "\n");
931 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
932 data->nrg_silence_ref);
933 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
934 data->nrg_energy_idx);
935 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
936 data->nrg_silence_idx);
937 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
938 data->nrg_th_cck);
939 pos += scnprintf(buf + pos, bufsz - pos,
940 "nrg_auto_corr_silence_diff:\t %u\n",
941 data->nrg_auto_corr_silence_diff);
942 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
943 data->num_in_cck_no_fa);
944 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
945 data->nrg_th_ofdm);
946
947 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
948 kfree(buf);
949 return ret;
950}
951
952
953static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
954 char __user *user_buf,
955 size_t count, loff_t *ppos) {
956
957 struct iwl_priv *priv = file->private_data;
958 int pos = 0;
959 int cnt = 0;
960 char *buf;
961 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
962 ssize_t ret;
963 struct iwl_chain_noise_data *data;
964
965 data = &priv->chain_noise_data;
966 buf = kzalloc(bufsz, GFP_KERNEL);
967 if (!buf) {
968 IWL_ERR(priv, "Can not allocate Buffer\n");
969 return -ENOMEM;
970 }
971
972 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
973 data->active_chains);
974 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
975 data->chain_noise_a);
976 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
977 data->chain_noise_b);
978 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
979 data->chain_noise_c);
980 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
981 data->chain_signal_a);
982 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
983 data->chain_signal_b);
984 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
985 data->chain_signal_c);
986 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
987 data->beacon_count);
988
989 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
990 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
991 pos += scnprintf(buf + pos, bufsz - pos, " %u",
992 data->disconn_array[cnt]);
993 }
994 pos += scnprintf(buf + pos, bufsz - pos, "\n");
995 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
996 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
997 pos += scnprintf(buf + pos, bufsz - pos, " %u",
998 data->delta_gain_code[cnt]);
999 }
1000 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1001 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1002 data->radio_write);
1003 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1004 data->state);
1005
1006 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1007 kfree(buf);
1008 return ret;
1009}
1010
1011static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
1012 char __user *user_buf,
1013 size_t count, loff_t *ppos)
1014{
1015 struct iwl_priv *priv = file->private_data;
1016 char buf[60];
1017 int pos = 0;
1018 const size_t bufsz = sizeof(buf);
1019 u32 pwrsave_status;
1020
1021 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1022 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1023
1024 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1025 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1026 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1027 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1028 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1029 "error");
1030
1031 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1032}
1033
1034static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
1035 const char __user *user_buf,
1036 size_t count, loff_t *ppos)
1037{
1038 struct iwl_priv *priv = file->private_data;
1039 char buf[8];
1040 int buf_size;
1041 int clear;
1042
1043 memset(buf, 0, sizeof(buf));
1044 buf_size = min(count, sizeof(buf) - 1);
1045 if (copy_from_user(buf, user_buf, buf_size))
1046 return -EFAULT;
1047 if (sscanf(buf, "%d", &clear) != 1)
1048 return -EFAULT;
1049
1050 /* make request to uCode to retrieve statistics information */
1051 mutex_lock(&priv->mutex);
1052 iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
1053 mutex_unlock(&priv->mutex);
1054
1055 return count;
1056}
1057
1058static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
1059 char __user *user_buf,
1060 size_t count, loff_t *ppos) {
1061
1062 struct iwl_priv *priv = file->private_data;
1063 int len = 0;
1064 char buf[20];
1065
1066 len = sprintf(buf, "0x%04X\n",
1067 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1068 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1069}
1070
1071static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
1072 char __user *user_buf,
1073 size_t count, loff_t *ppos) {
1074
1075 struct iwl_priv *priv = file->private_data;
1076 int len = 0;
1077 char buf[20];
1078
1079 len = sprintf(buf, "0x%04X\n",
1080 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1081 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1082}
1083
1084static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
1085 char __user *user_buf,
1086 size_t count, loff_t *ppos)
1087{
1088 struct iwl_priv *priv = file->private_data;
1089 char *buf;
1090 int pos = 0;
1091 ssize_t ret = -EFAULT;
1092
1093 if (priv->cfg->ops->lib->dump_fh) {
1094 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
1095 if (buf) {
1096 ret = simple_read_from_buffer(user_buf,
1097 count, ppos, buf, pos);
1098 kfree(buf);
1099 }
1100 }
1101
1102 return ret;
1103}
1104
1105static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
1106 char __user *user_buf,
1107 size_t count, loff_t *ppos) {
1108
1109 struct iwl_priv *priv = file->private_data;
1110 int pos = 0;
1111 char buf[12];
1112 const size_t bufsz = sizeof(buf);
1113
1114 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1115 priv->missed_beacon_threshold);
1116
1117 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1118}
1119
1120static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
1121 const char __user *user_buf,
1122 size_t count, loff_t *ppos)
1123{
1124 struct iwl_priv *priv = file->private_data;
1125 char buf[8];
1126 int buf_size;
1127 int missed;
1128
1129 memset(buf, 0, sizeof(buf));
1130 buf_size = min(count, sizeof(buf) - 1);
1131 if (copy_from_user(buf, user_buf, buf_size))
1132 return -EFAULT;
1133 if (sscanf(buf, "%d", &missed) != 1)
1134 return -EINVAL;
1135
1136 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
1137 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
1138 priv->missed_beacon_threshold =
1139 IWL_MISSED_BEACON_THRESHOLD_DEF;
1140 else
1141 priv->missed_beacon_threshold = missed;
1142
1143 return count;
1144}
1145
1146static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
1147 char __user *user_buf,
1148 size_t count, loff_t *ppos) {
1149
1150 struct iwl_priv *priv = file->private_data;
1151 int pos = 0;
1152 char buf[300];
1153 const size_t bufsz = sizeof(buf);
1154 struct iwl_force_reset *force_reset;
1155
1156 force_reset = &priv->force_reset;
1157
1158 pos += scnprintf(buf + pos, bufsz - pos,
1159 "\tnumber of reset request: %d\n",
1160 force_reset->reset_request_count);
1161 pos += scnprintf(buf + pos, bufsz - pos,
1162 "\tnumber of reset request success: %d\n",
1163 force_reset->reset_success_count);
1164 pos += scnprintf(buf + pos, bufsz - pos,
1165 "\tnumber of reset request reject: %d\n",
1166 force_reset->reset_reject_count);
1167 pos += scnprintf(buf + pos, bufsz - pos,
1168 "\treset duration: %lu\n",
1169 force_reset->reset_duration);
1170
1171 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1172}
1173
1174static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
1175 const char __user *user_buf,
1176 size_t count, loff_t *ppos) {
1177
1178 int ret;
1179 struct iwl_priv *priv = file->private_data;
1180
1181 ret = iwl_legacy_force_reset(priv, true);
1182
1183 return ret ? ret : count;
1184}
1185
1186static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
1187 const char __user *user_buf,
1188 size_t count, loff_t *ppos) {
1189
1190 struct iwl_priv *priv = file->private_data;
1191 char buf[8];
1192 int buf_size;
1193 int timeout;
1194
1195 memset(buf, 0, sizeof(buf));
1196 buf_size = min(count, sizeof(buf) - 1);
1197 if (copy_from_user(buf, user_buf, buf_size))
1198 return -EFAULT;
1199 if (sscanf(buf, "%d", &timeout) != 1)
1200 return -EINVAL;
1201 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
1202 timeout = IWL_DEF_WD_TIMEOUT;
1203
1204 priv->cfg->base_params->wd_timeout = timeout;
1205 iwl_legacy_setup_watchdog(priv);
1206 return count;
1207}
1208
1209DEBUGFS_READ_FILE_OPS(rx_statistics);
1210DEBUGFS_READ_FILE_OPS(tx_statistics);
1211DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1212DEBUGFS_READ_FILE_OPS(rx_queue);
1213DEBUGFS_READ_FILE_OPS(tx_queue);
1214DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1215DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1216DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1217DEBUGFS_READ_FILE_OPS(sensitivity);
1218DEBUGFS_READ_FILE_OPS(chain_noise);
1219DEBUGFS_READ_FILE_OPS(power_save_status);
1220DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1221DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1222DEBUGFS_READ_FILE_OPS(fh_reg);
1223DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1224DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1225DEBUGFS_READ_FILE_OPS(rxon_flags);
1226DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1227DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1228
1229/*
1230 * Create the debugfs files and directories
1231 *
1232 */
1233int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
1234{
1235 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
1236 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1237
1238 dir_drv = debugfs_create_dir(name, phyd);
1239 if (!dir_drv)
1240 return -ENOMEM;
1241
1242 priv->debugfs_dir = dir_drv;
1243
1244 dir_data = debugfs_create_dir("data", dir_drv);
1245 if (!dir_data)
1246 goto err;
1247 dir_rf = debugfs_create_dir("rf", dir_drv);
1248 if (!dir_rf)
1249 goto err;
1250 dir_debug = debugfs_create_dir("debug", dir_drv);
1251 if (!dir_debug)
1252 goto err;
1253
1254 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1255 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1256 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1257 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1258 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1259 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1260 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1261 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1262 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
1263 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
1264 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1265 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1266 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1267 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1268 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
1269 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
1270 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1271 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1272 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1273 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1274 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1275 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1276
1277 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1278 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1279 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1280 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1281 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1282 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1283 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1284 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1285 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1286 &priv->disable_sens_cal);
1287 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1288 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1289 &priv->disable_chain_noise_cal);
1290 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1291 &priv->disable_tx_power_cal);
1292 return 0;
1293
1294err:
1295 IWL_ERR(priv, "Can't create the debugfs directory\n");
1296 iwl_legacy_dbgfs_unregister(priv);
1297 return -ENOMEM;
1298}
1299EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
1300
1301/**
1302 * Remove the debugfs files and directories
1303 *
1304 */
1305void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
1306{
1307 if (!priv->debugfs_dir)
1308 return;
1309
1310 debugfs_remove_recursive(priv->debugfs_dir);
1311 priv->debugfs_dir = NULL;
1312}
1313EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
deleted file mode 100644
index 9c786edf56fd..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ /dev/null
@@ -1,1364 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_legacy_dev_h__
33#define __iwl_legacy_dev_h__
34
35#include <linux/interrupt.h>
36#include <linux/pci.h> /* for struct pci_device_id */
37#include <linux/kernel.h>
38#include <linux/leds.h>
39#include <linux/wait.h>
40#include <net/ieee80211_radiotap.h>
41
42#include "iwl-eeprom.h"
43#include "iwl-csr.h"
44#include "iwl-prph.h"
45#include "iwl-fh.h"
46#include "iwl-debug.h"
47#include "iwl-4965-hw.h"
48#include "iwl-3945-hw.h"
49#include "iwl-led.h"
50#include "iwl-power.h"
51#include "iwl-legacy-rs.h"
52
53struct iwl_tx_queue;
54
55/* CT-KILL constants */
56#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
57
58/* Default noise level to report when noise measurement is not available.
59 * This may be because we're:
60 * 1) Not associated (4965, no beacon statistics being sent to driver)
61 * 2) Scanning (noise measurement does not apply to associated channel)
62 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
63 * Use default noise value of -127 ... this is below the range of measurable
64 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
65 * Also, -127 works better than 0 when averaging frames with/without
66 * noise info (e.g. averaging might be done in app); measured dBm values are
67 * always negative ... using a negative value as the default keeps all
68 * averages within an s8's (used in some apps) range of negative values. */
69#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
70
71/*
72 * RTS threshold here is total size [2347] minus 4 FCS bytes
73 * Per spec:
74 * a value of 0 means RTS on all data/management packets
75 * a value > max MSDU size means no RTS
76 * else RTS for data/management frames where MPDU is larger
77 * than RTS value.
78 */
79#define DEFAULT_RTS_THRESHOLD 2347U
80#define MIN_RTS_THRESHOLD 0U
81#define MAX_RTS_THRESHOLD 2347U
82#define MAX_MSDU_SIZE 2304U
83#define MAX_MPDU_SIZE 2346U
84#define DEFAULT_BEACON_INTERVAL 100U
85#define DEFAULT_SHORT_RETRY_LIMIT 7U
86#define DEFAULT_LONG_RETRY_LIMIT 4U
87
88struct iwl_rx_mem_buffer {
89 dma_addr_t page_dma;
90 struct page *page;
91 struct list_head list;
92};
93
94#define rxb_addr(r) page_address(r->page)
95
96/* defined below */
97struct iwl_device_cmd;
98
99struct iwl_cmd_meta {
100 /* only for SYNC commands, iff the reply skb is wanted */
101 struct iwl_host_cmd *source;
102 /*
103 * only for ASYNC commands
104 * (which is somewhat stupid -- look at iwl-sta.c for instance
105 * which duplicates a bunch of code because the callback isn't
106 * invoked for SYNC commands, if it were and its result passed
107 * through it would be simpler...)
108 */
109 void (*callback)(struct iwl_priv *priv,
110 struct iwl_device_cmd *cmd,
111 struct iwl_rx_packet *pkt);
112
113 /* The CMD_SIZE_HUGE flag bit indicates that the command
114 * structure is stored at the end of the shared queue memory. */
115 u32 flags;
116
117 DEFINE_DMA_UNMAP_ADDR(mapping);
118 DEFINE_DMA_UNMAP_LEN(len);
119};
120
121/*
122 * Generic queue structure
123 *
124 * Contains common data for Rx and Tx queues
125 */
126struct iwl_queue {
127 int n_bd; /* number of BDs in this queue */
128 int write_ptr; /* 1-st empty entry (index) host_w*/
129 int read_ptr; /* last used entry (index) host_r*/
130 /* use for monitoring and recovering the stuck queue */
131 dma_addr_t dma_addr; /* physical addr for BD's */
132 int n_window; /* safe queue window */
133 u32 id;
134 int low_mark; /* low watermark, resume queue if free
135 * space more than this */
136 int high_mark; /* high watermark, stop queue if free
137 * space less than this */
138};
139
140/* One for each TFD */
141struct iwl_tx_info {
142 struct sk_buff *skb;
143 struct iwl_rxon_context *ctx;
144};
145
146/**
147 * struct iwl_tx_queue - Tx Queue for DMA
148 * @q: generic Rx/Tx queue descriptor
149 * @bd: base of circular buffer of TFDs
150 * @cmd: array of command/TX buffer pointers
151 * @meta: array of meta data for each command/tx buffer
152 * @dma_addr_cmd: physical address of cmd/tx buffer array
153 * @txb: array of per-TFD driver data
154 * @time_stamp: time (in jiffies) of last read_ptr change
155 * @need_update: indicates need to update read/write index
156 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
157 *
158 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
159 * descriptors) and required locking structures.
160 */
161#define TFD_TX_CMD_SLOTS 256
162#define TFD_CMD_SLOTS 32
163
164struct iwl_tx_queue {
165 struct iwl_queue q;
166 void *tfds;
167 struct iwl_device_cmd **cmd;
168 struct iwl_cmd_meta *meta;
169 struct iwl_tx_info *txb;
170 unsigned long time_stamp;
171 u8 need_update;
172 u8 sched_retry;
173 u8 active;
174 u8 swq_id;
175};
176
177#define IWL_NUM_SCAN_RATES (2)
178
179struct iwl4965_channel_tgd_info {
180 u8 type;
181 s8 max_power;
182};
183
184struct iwl4965_channel_tgh_info {
185 s64 last_radar_time;
186};
187
188#define IWL4965_MAX_RATE (33)
189
190struct iwl3945_clip_group {
191 /* maximum power level to prevent clipping for each rate, derived by
192 * us from this band's saturation power in EEPROM */
193 const s8 clip_powers[IWL_MAX_RATES];
194};
195
196/* current Tx power values to use, one for each rate for each channel.
197 * requested power is limited by:
198 * -- regulatory EEPROM limits for this channel
199 * -- hardware capabilities (clip-powers)
200 * -- spectrum management
201 * -- user preference (e.g. iwconfig)
202 * when requested power is set, base power index must also be set. */
203struct iwl3945_channel_power_info {
204 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
205 s8 power_table_index; /* actual (compenst'd) index into gain table */
206 s8 base_power_index; /* gain index for power at factory temp. */
207 s8 requested_power; /* power (dBm) requested for this chnl/rate */
208};
209
210/* current scan Tx power values to use, one for each scan rate for each
211 * channel. */
212struct iwl3945_scan_power_info {
213 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
214 s8 power_table_index; /* actual (compenst'd) index into gain table */
215 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
216};
217
218/*
219 * One for each channel, holds all channel setup data
220 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
221 * with one another!
222 */
223struct iwl_channel_info {
224 struct iwl4965_channel_tgd_info tgd;
225 struct iwl4965_channel_tgh_info tgh;
226 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
227 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
228 * HT40 channel */
229
230 u8 channel; /* channel number */
231 u8 flags; /* flags copied from EEPROM */
232 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
233 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
234 s8 min_power; /* always 0 */
235 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
236
237 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
238 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
239 enum ieee80211_band band;
240
241 /* HT40 channel info */
242 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
243 u8 ht40_flags; /* flags copied from EEPROM */
244 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
245
246 /* Radio/DSP gain settings for each "normal" data Tx rate.
247 * These include, in addition to RF and DSP gain, a few fields for
248 * remembering/modifying gain settings (indexes). */
249 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
250
251 /* Radio/DSP gain settings for each scan rate, for directed scans. */
252 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
253};
254
255#define IWL_TX_FIFO_BK 0 /* shared */
256#define IWL_TX_FIFO_BE 1
257#define IWL_TX_FIFO_VI 2 /* shared */
258#define IWL_TX_FIFO_VO 3
259#define IWL_TX_FIFO_UNUSED -1
260
261/* Minimum number of queues. MAX_NUM is defined in hw specific files.
262 * Set the minimum to accommodate the 4 standard TX queues, 1 command
263 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
264#define IWL_MIN_NUM_QUEUES 10
265
266#define IWL_DEFAULT_CMD_QUEUE_NUM 4
267
268#define IEEE80211_DATA_LEN 2304
269#define IEEE80211_4ADDR_LEN 30
270#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
271#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
272
273struct iwl_frame {
274 union {
275 struct ieee80211_hdr frame;
276 struct iwl_tx_beacon_cmd beacon;
277 u8 raw[IEEE80211_FRAME_LEN];
278 u8 cmd[360];
279 } u;
280 struct list_head list;
281};
282
283#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
284#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
285#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
286
287enum {
288 CMD_SYNC = 0,
289 CMD_SIZE_NORMAL = 0,
290 CMD_NO_SKB = 0,
291 CMD_SIZE_HUGE = (1 << 0),
292 CMD_ASYNC = (1 << 1),
293 CMD_WANT_SKB = (1 << 2),
294 CMD_MAPPED = (1 << 3),
295};
296
297#define DEF_CMD_PAYLOAD_SIZE 320
298
299/**
300 * struct iwl_device_cmd
301 *
302 * For allocation of the command and tx queues, this establishes the overall
303 * size of the largest command we send to uCode, except for a scan command
304 * (which is relatively huge; space is allocated separately).
305 */
306struct iwl_device_cmd {
307 struct iwl_cmd_header hdr; /* uCode API */
308 union {
309 u32 flags;
310 u8 val8;
311 u16 val16;
312 u32 val32;
313 struct iwl_tx_cmd tx;
314 u8 payload[DEF_CMD_PAYLOAD_SIZE];
315 } __packed cmd;
316} __packed;
317
318#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
319
320
321struct iwl_host_cmd {
322 const void *data;
323 unsigned long reply_page;
324 void (*callback)(struct iwl_priv *priv,
325 struct iwl_device_cmd *cmd,
326 struct iwl_rx_packet *pkt);
327 u32 flags;
328 u16 len;
329 u8 id;
330};
331
332#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
333#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
334#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
335
336/**
337 * struct iwl_rx_queue - Rx queue
338 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
339 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
340 * @read: Shared index to newest available Rx buffer
341 * @write: Shared index to oldest written Rx packet
342 * @free_count: Number of pre-allocated buffers in rx_free
343 * @rx_free: list of free SKBs for use
344 * @rx_used: List of Rx buffers with no SKB
345 * @need_update: flag to indicate we need to update read/write index
346 * @rb_stts: driver's pointer to receive buffer status
347 * @rb_stts_dma: bus address of receive buffer status
348 *
349 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
350 */
351struct iwl_rx_queue {
352 __le32 *bd;
353 dma_addr_t bd_dma;
354 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
355 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
356 u32 read;
357 u32 write;
358 u32 free_count;
359 u32 write_actual;
360 struct list_head rx_free;
361 struct list_head rx_used;
362 int need_update;
363 struct iwl_rb_status *rb_stts;
364 dma_addr_t rb_stts_dma;
365 spinlock_t lock;
366};
367
368#define IWL_SUPPORTED_RATES_IE_LEN 8
369
370#define MAX_TID_COUNT 9
371
372#define IWL_INVALID_RATE 0xFF
373#define IWL_INVALID_VALUE -1
374
375/**
376 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
377 * @txq_id: Tx queue used for Tx attempt
378 * @frame_count: # frames attempted by Tx command
379 * @wait_for_ba: Expect block-ack before next Tx reply
380 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
381 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
382 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
383 * @rate_n_flags: Rate at which Tx was attempted
384 *
385 * If REPLY_TX indicates that aggregation was attempted, driver must wait
386 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
387 * until block ack arrives.
388 */
389struct iwl_ht_agg {
390 u16 txq_id;
391 u16 frame_count;
392 u16 wait_for_ba;
393 u16 start_idx;
394 u64 bitmap;
395 u32 rate_n_flags;
396#define IWL_AGG_OFF 0
397#define IWL_AGG_ON 1
398#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
399#define IWL_EMPTYING_HW_QUEUE_DELBA 3
400 u8 state;
401};
402
403
404struct iwl_tid_data {
405 u16 seq_number; /* 4965 only */
406 u16 tfds_in_queue;
407 struct iwl_ht_agg agg;
408};
409
410struct iwl_hw_key {
411 u32 cipher;
412 int keylen;
413 u8 keyidx;
414 u8 key[32];
415};
416
417union iwl_ht_rate_supp {
418 u16 rates;
419 struct {
420 u8 siso_rate;
421 u8 mimo_rate;
422 };
423};
424
425#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
426#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
427#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
428#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
429#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
430#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
431#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
432
433/*
434 * Maximal MPDU density for TX aggregation
435 * 4 - 2us density
436 * 5 - 4us density
437 * 6 - 8us density
438 * 7 - 16us density
439 */
440#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
441#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
442#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
443#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
444#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
445#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
446#define CFG_HT_MPDU_DENSITY_MIN (0x1)
447
448struct iwl_ht_config {
449 bool single_chain_sufficient;
450 enum ieee80211_smps_mode smps; /* current smps mode */
451};
452
453/* QoS structures */
454struct iwl_qos_info {
455 int qos_active;
456 struct iwl_qosparam_cmd def_qos_parm;
457};
458
459/*
460 * Structure should be accessed with sta_lock held. When station addition
461 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
462 * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
463 * sta_lock held.
464 */
465struct iwl_station_entry {
466 struct iwl_legacy_addsta_cmd sta;
467 struct iwl_tid_data tid[MAX_TID_COUNT];
468 u8 used, ctxid;
469 struct iwl_hw_key keyinfo;
470 struct iwl_link_quality_cmd *lq;
471};
472
473struct iwl_station_priv_common {
474 struct iwl_rxon_context *ctx;
475 u8 sta_id;
476};
477
478/*
479 * iwl_station_priv: Driver's private station information
480 *
481 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
482 * in the structure for use by driver. This structure is places in that
483 * space.
484 *
485 * The common struct MUST be first because it is shared between
486 * 3945 and 4965!
487 */
488struct iwl_station_priv {
489 struct iwl_station_priv_common common;
490 struct iwl_lq_sta lq_sta;
491 atomic_t pending_frames;
492 bool client;
493 bool asleep;
494};
495
496/**
497 * struct iwl_vif_priv - driver's private per-interface information
498 *
499 * When mac80211 allocates a virtual interface, it can allocate
500 * space for us to put data into.
501 */
502struct iwl_vif_priv {
503 struct iwl_rxon_context *ctx;
504 u8 ibss_bssid_sta_id;
505};
506
507/* one for each uCode image (inst/data, boot/init/runtime) */
508struct fw_desc {
509 void *v_addr; /* access by driver */
510 dma_addr_t p_addr; /* access by card's busmaster DMA */
511 u32 len; /* bytes */
512};
513
514/* uCode file layout */
515struct iwl_ucode_header {
516 __le32 ver; /* major/minor/API/serial */
517 struct {
518 __le32 inst_size; /* bytes of runtime code */
519 __le32 data_size; /* bytes of runtime data */
520 __le32 init_size; /* bytes of init code */
521 __le32 init_data_size; /* bytes of init data */
522 __le32 boot_size; /* bytes of bootstrap code */
523 u8 data[0]; /* in same order as sizes */
524 } v1;
525};
526
527struct iwl4965_ibss_seq {
528 u8 mac[ETH_ALEN];
529 u16 seq_num;
530 u16 frag_num;
531 unsigned long packet_time;
532 struct list_head list;
533};
534
535struct iwl_sensitivity_ranges {
536 u16 min_nrg_cck;
537 u16 max_nrg_cck;
538
539 u16 nrg_th_cck;
540 u16 nrg_th_ofdm;
541
542 u16 auto_corr_min_ofdm;
543 u16 auto_corr_min_ofdm_mrc;
544 u16 auto_corr_min_ofdm_x1;
545 u16 auto_corr_min_ofdm_mrc_x1;
546
547 u16 auto_corr_max_ofdm;
548 u16 auto_corr_max_ofdm_mrc;
549 u16 auto_corr_max_ofdm_x1;
550 u16 auto_corr_max_ofdm_mrc_x1;
551
552 u16 auto_corr_max_cck;
553 u16 auto_corr_max_cck_mrc;
554 u16 auto_corr_min_cck;
555 u16 auto_corr_min_cck_mrc;
556
557 u16 barker_corr_th_min;
558 u16 barker_corr_th_min_mrc;
559 u16 nrg_th_cca;
560};
561
562
563#define KELVIN_TO_CELSIUS(x) ((x)-273)
564#define CELSIUS_TO_KELVIN(x) ((x)+273)
565
566
567/**
568 * struct iwl_hw_params
569 * @max_txq_num: Max # Tx queues supported
570 * @dma_chnl_num: Number of Tx DMA/FIFO channels
571 * @scd_bc_tbls_size: size of scheduler byte count tables
572 * @tfd_size: TFD size
573 * @tx/rx_chains_num: Number of TX/RX chains
574 * @valid_tx/rx_ant: usable antennas
575 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
576 * @max_rxq_log: Log-base-2 of max_rxq_size
577 * @rx_page_order: Rx buffer page order
578 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
579 * @max_stations:
580 * @ht40_channel: is 40MHz width possible in band 2.4
581 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
582 * @sw_crypto: 0 for hw, 1 for sw
583 * @max_xxx_size: for ucode uses
584 * @ct_kill_threshold: temperature threshold
585 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
586 * @struct iwl_sensitivity_ranges: range of sensitivity values
587 */
588struct iwl_hw_params {
589 u8 max_txq_num;
590 u8 dma_chnl_num;
591 u16 scd_bc_tbls_size;
592 u32 tfd_size;
593 u8 tx_chains_num;
594 u8 rx_chains_num;
595 u8 valid_tx_ant;
596 u8 valid_rx_ant;
597 u16 max_rxq_size;
598 u16 max_rxq_log;
599 u32 rx_page_order;
600 u32 rx_wrt_ptr_reg;
601 u8 max_stations;
602 u8 ht40_channel;
603 u8 max_beacon_itrvl; /* in 1024 ms */
604 u32 max_inst_size;
605 u32 max_data_size;
606 u32 max_bsm_size;
607 u32 ct_kill_threshold; /* value in hw-dependent units */
608 u16 beacon_time_tsf_bits;
609 const struct iwl_sensitivity_ranges *sens;
610};
611
612
613/******************************************************************************
614 *
615 * Functions implemented in core module which are forward declared here
616 * for use by iwl-[4-5].c
617 *
618 * NOTE: The implementation of these functions are not hardware specific
619 * which is why they are in the core module files.
620 *
621 * Naming convention --
622 * iwl_ <-- Is part of iwlwifi
623 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
624 * iwl4965_bg_ <-- Called from work queue context
625 * iwl4965_mac_ <-- mac80211 callback
626 *
627 ****************************************************************************/
628extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
629extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
630extern int iwl_legacy_queue_space(const struct iwl_queue *q);
631static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
632{
633 return q->write_ptr >= q->read_ptr ?
634 (i >= q->read_ptr && i < q->write_ptr) :
635 !(i < q->read_ptr && i >= q->write_ptr);
636}
637
638
639static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
640 int is_huge)
641{
642 /*
643 * This is for init calibration result and scan command which
644 * required buffer > TFD_MAX_PAYLOAD_SIZE,
645 * the big buffer at end of command array
646 */
647 if (is_huge)
648 return q->n_window; /* must be power of 2 */
649
650 /* Otherwise, use normal size buffers */
651 return index & (q->n_window - 1);
652}
653
654
655struct iwl_dma_ptr {
656 dma_addr_t dma;
657 void *addr;
658 size_t size;
659};
660
661#define IWL_OPERATION_MODE_AUTO 0
662#define IWL_OPERATION_MODE_HT_ONLY 1
663#define IWL_OPERATION_MODE_MIXED 2
664#define IWL_OPERATION_MODE_20MHZ 3
665
666#define IWL_TX_CRC_SIZE 4
667#define IWL_TX_DELIMITER_SIZE 4
668
669#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
670
671/* Sensitivity and chain noise calibration */
672#define INITIALIZATION_VALUE 0xFFFF
673#define IWL4965_CAL_NUM_BEACONS 20
674#define IWL_CAL_NUM_BEACONS 16
675#define MAXIMUM_ALLOWED_PATHLOSS 15
676
677#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
678
679#define MAX_FA_OFDM 50
680#define MIN_FA_OFDM 5
681#define MAX_FA_CCK 50
682#define MIN_FA_CCK 5
683
684#define AUTO_CORR_STEP_OFDM 1
685
686#define AUTO_CORR_STEP_CCK 3
687#define AUTO_CORR_MAX_TH_CCK 160
688
689#define NRG_DIFF 2
690#define NRG_STEP_CCK 2
691#define NRG_MARGIN 8
692#define MAX_NUMBER_CCK_NO_FA 100
693
694#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
695
696#define CHAIN_A 0
697#define CHAIN_B 1
698#define CHAIN_C 2
699#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
700#define ALL_BAND_FILTER 0xFF00
701#define IN_BAND_FILTER 0xFF
702#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
703
704#define NRG_NUM_PREV_STAT_L 20
705#define NUM_RX_CHAINS 3
706
707enum iwl4965_false_alarm_state {
708 IWL_FA_TOO_MANY = 0,
709 IWL_FA_TOO_FEW = 1,
710 IWL_FA_GOOD_RANGE = 2,
711};
712
713enum iwl4965_chain_noise_state {
714 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
715 IWL_CHAIN_NOISE_ACCUMULATE,
716 IWL_CHAIN_NOISE_CALIBRATED,
717 IWL_CHAIN_NOISE_DONE,
718};
719
720enum iwl4965_calib_enabled_state {
721 IWL_CALIB_DISABLED = 0, /* must be 0 */
722 IWL_CALIB_ENABLED = 1,
723};
724
725/*
726 * enum iwl_calib
727 * defines the order in which results of initial calibrations
728 * should be sent to the runtime uCode
729 */
730enum iwl_calib {
731 IWL_CALIB_MAX,
732};
733
734/* Opaque calibration results */
735struct iwl_calib_result {
736 void *buf;
737 size_t buf_len;
738};
739
740enum ucode_type {
741 UCODE_NONE = 0,
742 UCODE_INIT,
743 UCODE_RT
744};
745
746/* Sensitivity calib data */
747struct iwl_sensitivity_data {
748 u32 auto_corr_ofdm;
749 u32 auto_corr_ofdm_mrc;
750 u32 auto_corr_ofdm_x1;
751 u32 auto_corr_ofdm_mrc_x1;
752 u32 auto_corr_cck;
753 u32 auto_corr_cck_mrc;
754
755 u32 last_bad_plcp_cnt_ofdm;
756 u32 last_fa_cnt_ofdm;
757 u32 last_bad_plcp_cnt_cck;
758 u32 last_fa_cnt_cck;
759
760 u32 nrg_curr_state;
761 u32 nrg_prev_state;
762 u32 nrg_value[10];
763 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
764 u32 nrg_silence_ref;
765 u32 nrg_energy_idx;
766 u32 nrg_silence_idx;
767 u32 nrg_th_cck;
768 s32 nrg_auto_corr_silence_diff;
769 u32 num_in_cck_no_fa;
770 u32 nrg_th_ofdm;
771
772 u16 barker_corr_th_min;
773 u16 barker_corr_th_min_mrc;
774 u16 nrg_th_cca;
775};
776
777/* Chain noise (differential Rx gain) calib data */
778struct iwl_chain_noise_data {
779 u32 active_chains;
780 u32 chain_noise_a;
781 u32 chain_noise_b;
782 u32 chain_noise_c;
783 u32 chain_signal_a;
784 u32 chain_signal_b;
785 u32 chain_signal_c;
786 u16 beacon_count;
787 u8 disconn_array[NUM_RX_CHAINS];
788 u8 delta_gain_code[NUM_RX_CHAINS];
789 u8 radio_write;
790 u8 state;
791};
792
793#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
794#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
795
796#define IWL_TRAFFIC_ENTRIES (256)
797#define IWL_TRAFFIC_ENTRY_SIZE (64)
798
799enum {
800 MEASUREMENT_READY = (1 << 0),
801 MEASUREMENT_ACTIVE = (1 << 1),
802};
803
804/* interrupt statistics */
805struct isr_statistics {
806 u32 hw;
807 u32 sw;
808 u32 err_code;
809 u32 sch;
810 u32 alive;
811 u32 rfkill;
812 u32 ctkill;
813 u32 wakeup;
814 u32 rx;
815 u32 rx_handlers[REPLY_MAX];
816 u32 tx;
817 u32 unhandled;
818};
819
820/* management statistics */
821enum iwl_mgmt_stats {
822 MANAGEMENT_ASSOC_REQ = 0,
823 MANAGEMENT_ASSOC_RESP,
824 MANAGEMENT_REASSOC_REQ,
825 MANAGEMENT_REASSOC_RESP,
826 MANAGEMENT_PROBE_REQ,
827 MANAGEMENT_PROBE_RESP,
828 MANAGEMENT_BEACON,
829 MANAGEMENT_ATIM,
830 MANAGEMENT_DISASSOC,
831 MANAGEMENT_AUTH,
832 MANAGEMENT_DEAUTH,
833 MANAGEMENT_ACTION,
834 MANAGEMENT_MAX,
835};
836/* control statistics */
837enum iwl_ctrl_stats {
838 CONTROL_BACK_REQ = 0,
839 CONTROL_BACK,
840 CONTROL_PSPOLL,
841 CONTROL_RTS,
842 CONTROL_CTS,
843 CONTROL_ACK,
844 CONTROL_CFEND,
845 CONTROL_CFENDACK,
846 CONTROL_MAX,
847};
848
849struct traffic_stats {
850#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
851 u32 mgmt[MANAGEMENT_MAX];
852 u32 ctrl[CONTROL_MAX];
853 u32 data_cnt;
854 u64 data_bytes;
855#endif
856};
857
858/*
859 * host interrupt timeout value
860 * used with setting interrupt coalescing timer
861 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
862 *
863 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
864 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
865 */
866#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
867#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
868#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
869#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
870#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
871#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
872
873#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
874
875/* TX queue watchdog timeouts in mSecs */
876#define IWL_DEF_WD_TIMEOUT (2000)
877#define IWL_LONG_WD_TIMEOUT (10000)
878#define IWL_MAX_WD_TIMEOUT (120000)
879
880struct iwl_force_reset {
881 int reset_request_count;
882 int reset_success_count;
883 int reset_reject_count;
884 unsigned long reset_duration;
885 unsigned long last_force_reset_jiffies;
886};
887
888/* extend beacon time format bit shifting */
889/*
890 * for _3945 devices
891 * bits 31:24 - extended
892 * bits 23:0 - interval
893 */
894#define IWL3945_EXT_BEACON_TIME_POS 24
895/*
896 * for _4965 devices
897 * bits 31:22 - extended
898 * bits 21:0 - interval
899 */
900#define IWL4965_EXT_BEACON_TIME_POS 22
901
902enum iwl_rxon_context_id {
903 IWL_RXON_CTX_BSS,
904
905 NUM_IWL_RXON_CTX
906};
907
908struct iwl_rxon_context {
909 struct ieee80211_vif *vif;
910
911 const u8 *ac_to_fifo;
912 const u8 *ac_to_queue;
913 u8 mcast_queue;
914
915 /*
916 * We could use the vif to indicate active, but we
917 * also need it to be active during disabling when
918 * we already removed the vif for type setting.
919 */
920 bool always_active, is_active;
921
922 bool ht_need_multiple_chains;
923
924 enum iwl_rxon_context_id ctxid;
925
926 u32 interface_modes, exclusive_interface_modes;
927 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
928
929 /*
930 * We declare this const so it can only be
931 * changed via explicit cast within the
932 * routines that actually update the physical
933 * hardware.
934 */
935 const struct iwl_legacy_rxon_cmd active;
936 struct iwl_legacy_rxon_cmd staging;
937
938 struct iwl_rxon_time_cmd timing;
939
940 struct iwl_qos_info qos_data;
941
942 u8 bcast_sta_id, ap_sta_id;
943
944 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
945 u8 qos_cmd;
946 u8 wep_key_cmd;
947
948 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
949 u8 key_mapping_keys;
950
951 __le32 station_flags;
952
953 struct {
954 bool non_gf_sta_present;
955 u8 protection;
956 bool enabled, is_40mhz;
957 u8 extension_chan_offset;
958 } ht;
959};
960
961struct iwl_priv {
962
963 /* ieee device used by generic ieee processing code */
964 struct ieee80211_hw *hw;
965 struct ieee80211_channel *ieee_channels;
966 struct ieee80211_rate *ieee_rates;
967 struct iwl_cfg *cfg;
968
969 /* temporary frame storage list */
970 struct list_head free_frames;
971 int frames_count;
972
973 enum ieee80211_band band;
974 int alloc_rxb_page;
975
976 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
977 struct iwl_rx_mem_buffer *rxb);
978
979 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
980
981 /* spectrum measurement report caching */
982 struct iwl_spectrum_notification measure_report;
983 u8 measurement_status;
984
985 /* ucode beacon time */
986 u32 ucode_beacon_time;
987 int missed_beacon_threshold;
988
989 /* track IBSS manager (last beacon) status */
990 u32 ibss_manager;
991
992 /* force reset */
993 struct iwl_force_reset force_reset;
994
995 /* we allocate array of iwl_channel_info for NIC's valid channels.
996 * Access via channel # using indirect index array */
997 struct iwl_channel_info *channel_info; /* channel info array */
998 u8 channel_count; /* # of channels */
999
1000 /* thermal calibration */
1001 s32 temperature; /* degrees Kelvin */
1002 s32 last_temperature;
1003
1004 /* init calibration results */
1005 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1006
1007 /* Scan related variables */
1008 unsigned long scan_start;
1009 unsigned long scan_start_tsf;
1010 void *scan_cmd;
1011 enum ieee80211_band scan_band;
1012 struct cfg80211_scan_request *scan_request;
1013 struct ieee80211_vif *scan_vif;
1014 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1015 u8 mgmt_tx_ant;
1016
1017 /* spinlock */
1018 spinlock_t lock; /* protect general shared data */
1019 spinlock_t hcmd_lock; /* protect hcmd */
1020 spinlock_t reg_lock; /* protect hw register access */
1021 struct mutex mutex;
1022
1023 /* basic pci-network driver stuff */
1024 struct pci_dev *pci_dev;
1025
1026 /* pci hardware address support */
1027 void __iomem *hw_base;
1028 u32 hw_rev;
1029 u32 hw_wa_rev;
1030 u8 rev_id;
1031
1032 /* microcode/device supports multiple contexts */
1033 u8 valid_contexts;
1034
1035 /* command queue number */
1036 u8 cmd_queue;
1037
1038 /* max number of station keys */
1039 u8 sta_key_max_num;
1040
1041 /* EEPROM MAC addresses */
1042 struct mac_address addresses[1];
1043
1044 /* uCode images, save to reload in case of failure */
1045 int fw_index; /* firmware we're trying to load */
1046 u32 ucode_ver; /* version of ucode, copy of
1047 iwl_ucode.ver */
1048 struct fw_desc ucode_code; /* runtime inst */
1049 struct fw_desc ucode_data; /* runtime data original */
1050 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1051 struct fw_desc ucode_init; /* initialization inst */
1052 struct fw_desc ucode_init_data; /* initialization data */
1053 struct fw_desc ucode_boot; /* bootstrap inst */
1054 enum ucode_type ucode_type;
1055 u8 ucode_write_complete; /* the image write is complete */
1056 char firmware_name[25];
1057
1058 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1059
1060 __le16 switch_channel;
1061
1062 /* 1st responses from initialize and runtime uCode images.
1063 * _4965's initialize alive response contains some calibration data. */
1064 struct iwl_init_alive_resp card_alive_init;
1065 struct iwl_alive_resp card_alive;
1066
1067 u16 active_rate;
1068
1069 u8 start_calib;
1070 struct iwl_sensitivity_data sensitivity_data;
1071 struct iwl_chain_noise_data chain_noise_data;
1072 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1073
1074 struct iwl_ht_config current_ht_config;
1075
1076 /* Rate scaling data */
1077 u8 retry_rate;
1078
1079 wait_queue_head_t wait_command_queue;
1080
1081 int activity_timer_active;
1082
1083 /* Rx and Tx DMA processing queues */
1084 struct iwl_rx_queue rxq;
1085 struct iwl_tx_queue *txq;
1086 unsigned long txq_ctx_active_msk;
1087 struct iwl_dma_ptr kw; /* keep warm address */
1088 struct iwl_dma_ptr scd_bc_tbls;
1089
1090 u32 scd_base_addr; /* scheduler sram base address */
1091
1092 unsigned long status;
1093
1094 /* counts mgmt, ctl, and data packets */
1095 struct traffic_stats tx_stats;
1096 struct traffic_stats rx_stats;
1097
1098 /* counts interrupts */
1099 struct isr_statistics isr_stats;
1100
1101 struct iwl_power_mgr power_data;
1102
1103 /* context information */
1104 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1105
1106 /* station table variables */
1107
1108 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1109 spinlock_t sta_lock;
1110 int num_stations;
1111 struct iwl_station_entry stations[IWL_STATION_COUNT];
1112 unsigned long ucode_key_table;
1113
1114 /* queue refcounts */
1115#define IWL_MAX_HW_QUEUES 32
1116 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1117 /* for each AC */
1118 atomic_t queue_stop_count[4];
1119
1120 /* Indication if ieee80211_ops->open has been called */
1121 u8 is_open;
1122
1123 u8 mac80211_registered;
1124
1125 /* eeprom -- this is in the card's little endian byte order */
1126 u8 *eeprom;
1127 struct iwl_eeprom_calib_info *calib_info;
1128
1129 enum nl80211_iftype iw_mode;
1130
1131 /* Last Rx'd beacon timestamp */
1132 u64 timestamp;
1133
1134 union {
1135#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1136 struct {
1137 void *shared_virt;
1138 dma_addr_t shared_phys;
1139
1140 struct delayed_work thermal_periodic;
1141 struct delayed_work rfkill_poll;
1142
1143 struct iwl3945_notif_statistics statistics;
1144#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1145 struct iwl3945_notif_statistics accum_statistics;
1146 struct iwl3945_notif_statistics delta_statistics;
1147 struct iwl3945_notif_statistics max_delta;
1148#endif
1149
1150 u32 sta_supp_rates;
1151 int last_rx_rssi; /* From Rx packet statistics */
1152
1153 /* Rx'd packet timing information */
1154 u32 last_beacon_time;
1155 u64 last_tsf;
1156
1157 /*
1158 * each calibration channel group in the
1159 * EEPROM has a derived clip setting for
1160 * each rate.
1161 */
1162 const struct iwl3945_clip_group clip_groups[5];
1163
1164 } _3945;
1165#endif
1166#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1167 struct {
1168 struct iwl_rx_phy_res last_phy_res;
1169 bool last_phy_res_valid;
1170
1171 struct completion firmware_loading_complete;
1172
1173 /*
1174 * chain noise reset and gain commands are the
1175 * two extra calibration commands follows the standard
1176 * phy calibration commands
1177 */
1178 u8 phy_calib_chain_noise_reset_cmd;
1179 u8 phy_calib_chain_noise_gain_cmd;
1180
1181 struct iwl_notif_statistics statistics;
1182#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1183 struct iwl_notif_statistics accum_statistics;
1184 struct iwl_notif_statistics delta_statistics;
1185 struct iwl_notif_statistics max_delta;
1186#endif
1187
1188 } _4965;
1189#endif
1190 };
1191
1192 struct iwl_hw_params hw_params;
1193
1194 u32 inta_mask;
1195
1196 struct workqueue_struct *workqueue;
1197
1198 struct work_struct restart;
1199 struct work_struct scan_completed;
1200 struct work_struct rx_replenish;
1201 struct work_struct abort_scan;
1202
1203 struct iwl_rxon_context *beacon_ctx;
1204 struct sk_buff *beacon_skb;
1205
1206 struct work_struct tx_flush;
1207
1208 struct tasklet_struct irq_tasklet;
1209
1210 struct delayed_work init_alive_start;
1211 struct delayed_work alive_start;
1212 struct delayed_work scan_check;
1213
1214 /* TX Power */
1215 s8 tx_power_user_lmt;
1216 s8 tx_power_device_lmt;
1217 s8 tx_power_next;
1218
1219
1220#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1221 /* debugging info */
1222 u32 debug_level; /* per device debugging will override global
1223 iwlegacy_debug_level if set */
1224#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1225#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1226 /* debugfs */
1227 u16 tx_traffic_idx;
1228 u16 rx_traffic_idx;
1229 u8 *tx_traffic;
1230 u8 *rx_traffic;
1231 struct dentry *debugfs_dir;
1232 u32 dbgfs_sram_offset, dbgfs_sram_len;
1233 bool disable_ht40;
1234#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
1235
1236 struct work_struct txpower_work;
1237 u32 disable_sens_cal;
1238 u32 disable_chain_noise_cal;
1239 u32 disable_tx_power_cal;
1240 struct work_struct run_time_calib_work;
1241 struct timer_list statistics_periodic;
1242 struct timer_list watchdog;
1243 bool hw_ready;
1244
1245 struct led_classdev led;
1246 unsigned long blink_on, blink_off;
1247 bool led_registered;
1248}; /*iwl_priv */
1249
1250static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1251{
1252 set_bit(txq_id, &priv->txq_ctx_active_msk);
1253}
1254
1255static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1256{
1257 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1258}
1259
1260#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1261/*
1262 * iwl_legacy_get_debug_level: Return active debug level for device
1263 *
1264 * Using sysfs it is possible to set per device debug level. This debug
1265 * level will be used if set, otherwise the global debug level which can be
1266 * set via module parameter is used.
1267 */
1268static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1269{
1270 if (priv->debug_level)
1271 return priv->debug_level;
1272 else
1273 return iwlegacy_debug_level;
1274}
1275#else
1276static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1277{
1278 return iwlegacy_debug_level;
1279}
1280#endif
1281
1282
1283static inline struct ieee80211_hdr *
1284iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
1285 int txq_id, int idx)
1286{
1287 if (priv->txq[txq_id].txb[idx].skb)
1288 return (struct ieee80211_hdr *)priv->txq[txq_id].
1289 txb[idx].skb->data;
1290 return NULL;
1291}
1292
1293static inline struct iwl_rxon_context *
1294iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1295{
1296 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1297
1298 return vif_priv->ctx;
1299}
1300
1301#define for_each_context(priv, ctx) \
1302 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1303 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1304 if (priv->valid_contexts & BIT(ctx->ctxid))
1305
1306static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
1307 enum iwl_rxon_context_id ctxid)
1308{
1309 return (priv->contexts[ctxid].active.filter_flags &
1310 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1311}
1312
1313static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
1314{
1315 return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
1316}
1317
1318static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
1319{
1320 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1321}
1322
1323static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
1324{
1325 if (ch_info == NULL)
1326 return 0;
1327 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1328}
1329
1330static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
1331{
1332 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1333}
1334
1335static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
1336{
1337 return ch_info->band == IEEE80211_BAND_5GHZ;
1338}
1339
1340static inline int
1341iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1342{
1343 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1344}
1345
1346static inline int
1347iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
1348{
1349 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1350}
1351
1352static inline void
1353__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1354{
1355 __free_pages(page, priv->hw_params.rx_page_order);
1356 priv->alloc_rxb_page--;
1357}
1358
1359static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
1360{
1361 free_pages(page, priv->hw_params.rx_page_order);
1362 priv->alloc_rxb_page--;
1363}
1364#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
deleted file mode 100644
index acec99197ce0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c
+++ /dev/null
@@ -1,42 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28
29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__
31#include "iwl-dev.h"
32
33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h"
35
36EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
42#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
deleted file mode 100644
index a443725ba6be..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h
+++ /dev/null
@@ -1,210 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_LEGACY_DEVICE_TRACE
29
30#include <linux/tracepoint.h>
31
32#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
33#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, ...) \
35static inline void trace_ ## name(proto) {}
36#endif
37
38
39#define PRIV_ENTRY __field(struct iwl_priv *, priv)
40#define PRIV_ASSIGN (__entry->priv = priv)
41
42#undef TRACE_SYSTEM
43#define TRACE_SYSTEM iwlwifi_legacy_io
44
45TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
46 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
47 TP_ARGS(priv, offs, val),
48 TP_STRUCT__entry(
49 PRIV_ENTRY
50 __field(u32, offs)
51 __field(u32, val)
52 ),
53 TP_fast_assign(
54 PRIV_ASSIGN;
55 __entry->offs = offs;
56 __entry->val = val;
57 ),
58 TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
59 __entry->offs, __entry->val)
60);
61
62TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
63 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
64 TP_ARGS(priv, offs, val),
65 TP_STRUCT__entry(
66 PRIV_ENTRY
67 __field(u32, offs)
68 __field(u8, val)
69 ),
70 TP_fast_assign(
71 PRIV_ASSIGN;
72 __entry->offs = offs;
73 __entry->val = val;
74 ),
75 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
76 __entry->offs, __entry->val)
77);
78
79TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
80 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
81 TP_ARGS(priv, offs, val),
82 TP_STRUCT__entry(
83 PRIV_ENTRY
84 __field(u32, offs)
85 __field(u32, val)
86 ),
87 TP_fast_assign(
88 PRIV_ASSIGN;
89 __entry->offs = offs;
90 __entry->val = val;
91 ),
92 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
93 __entry->offs, __entry->val)
94);
95
96#undef TRACE_SYSTEM
97#define TRACE_SYSTEM iwlwifi_legacy_ucode
98
99#undef TRACE_SYSTEM
100#define TRACE_SYSTEM iwlwifi
101
102TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
103 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
104 TP_ARGS(priv, hcmd, len, flags),
105 TP_STRUCT__entry(
106 PRIV_ENTRY
107 __dynamic_array(u8, hcmd, len)
108 __field(u32, flags)
109 ),
110 TP_fast_assign(
111 PRIV_ASSIGN;
112 memcpy(__get_dynamic_array(hcmd), hcmd, len);
113 __entry->flags = flags;
114 ),
115 TP_printk("[%p] hcmd %#.2x (%ssync)",
116 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
117 __entry->flags & CMD_ASYNC ? "a" : "")
118);
119
120TRACE_EVENT(iwlwifi_legacy_dev_rx,
121 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
122 TP_ARGS(priv, rxbuf, len),
123 TP_STRUCT__entry(
124 PRIV_ENTRY
125 __dynamic_array(u8, rxbuf, len)
126 ),
127 TP_fast_assign(
128 PRIV_ASSIGN;
129 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
130 ),
131 TP_printk("[%p] RX cmd %#.2x",
132 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
133);
134
135TRACE_EVENT(iwlwifi_legacy_dev_tx,
136 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
137 void *buf0, size_t buf0_len,
138 void *buf1, size_t buf1_len),
139 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
140 TP_STRUCT__entry(
141 PRIV_ENTRY
142
143 __field(size_t, framelen)
144 __dynamic_array(u8, tfd, tfdlen)
145
146 /*
147 * Do not insert between or below these items,
148 * we want to keep the frame together (except
149 * for the possible padding).
150 */
151 __dynamic_array(u8, buf0, buf0_len)
152 __dynamic_array(u8, buf1, buf1_len)
153 ),
154 TP_fast_assign(
155 PRIV_ASSIGN;
156 __entry->framelen = buf0_len + buf1_len;
157 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
158 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
159 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
160 ),
161 TP_printk("[%p] TX %.2x (%zu bytes)",
162 __entry->priv,
163 ((u8 *)__get_dynamic_array(buf0))[0],
164 __entry->framelen)
165);
166
167TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
168 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
169 u32 data1, u32 data2, u32 line, u32 blink1,
170 u32 blink2, u32 ilink1, u32 ilink2),
171 TP_ARGS(priv, desc, time, data1, data2, line,
172 blink1, blink2, ilink1, ilink2),
173 TP_STRUCT__entry(
174 PRIV_ENTRY
175 __field(u32, desc)
176 __field(u32, time)
177 __field(u32, data1)
178 __field(u32, data2)
179 __field(u32, line)
180 __field(u32, blink1)
181 __field(u32, blink2)
182 __field(u32, ilink1)
183 __field(u32, ilink2)
184 ),
185 TP_fast_assign(
186 PRIV_ASSIGN;
187 __entry->desc = desc;
188 __entry->time = time;
189 __entry->data1 = data1;
190 __entry->data2 = data2;
191 __entry->line = line;
192 __entry->blink1 = blink1;
193 __entry->blink2 = blink2;
194 __entry->ilink1 = ilink1;
195 __entry->ilink2 = ilink2;
196 ),
197 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
198 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
199 __entry->priv, __entry->desc, __entry->time, __entry->data1,
200 __entry->data2, __entry->line, __entry->blink1,
201 __entry->blink2, __entry->ilink1, __entry->ilink2)
202);
203
204#endif /* __IWLWIFI_DEVICE_TRACE */
205
206#undef TRACE_INCLUDE_PATH
207#define TRACE_INCLUDE_PATH .
208#undef TRACE_INCLUDE_FILE
209#define TRACE_INCLUDE_FILE iwl-devtrace
210#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
deleted file mode 100644
index 5bf3f49b74ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ /dev/null
@@ -1,553 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwlegacy_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwlegacy_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
146{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
148 int ret = 0;
149
150 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
151 switch (gp) {
152 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
153 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
154 break;
155 default:
156 IWL_ERR(priv, "bad EEPROM signature,"
157 "EEPROM_GP=0x%08x\n", gp);
158 ret = -ENOENT;
159 break;
160 }
161 return ret;
162}
163
164const u8
165*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
166{
167 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
168 return &priv->eeprom[offset];
169}
170EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
171
172u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
173{
174 if (!priv->eeprom)
175 return 0;
176 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
177}
178EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
179
180/**
181 * iwl_legacy_eeprom_init - read EEPROM contents
182 *
183 * Load the EEPROM contents from adapter into priv->eeprom
184 *
185 * NOTE: This routine uses the non-debug IO access functions.
186 */
187int iwl_legacy_eeprom_init(struct iwl_priv *priv)
188{
189 __le16 *e;
190 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
191 int sz;
192 int ret;
193 u16 addr;
194
195 /* allocate eeprom */
196 sz = priv->cfg->base_params->eeprom_size;
197 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
198 priv->eeprom = kzalloc(sz, GFP_KERNEL);
199 if (!priv->eeprom) {
200 ret = -ENOMEM;
201 goto alloc_err;
202 }
203 e = (__le16 *)priv->eeprom;
204
205 priv->cfg->ops->lib->apm_ops.init(priv);
206
207 ret = iwl_legacy_eeprom_verify_signature(priv);
208 if (ret < 0) {
209 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
210 ret = -ENOENT;
211 goto err;
212 }
213
214 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
215 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
216 if (ret < 0) {
217 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
218 ret = -ENOENT;
219 goto err;
220 }
221
222 /* eeprom is an array of 16bit values */
223 for (addr = 0; addr < sz; addr += sizeof(u16)) {
224 u32 r;
225
226 _iwl_legacy_write32(priv, CSR_EEPROM_REG,
227 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
228
229 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
230 CSR_EEPROM_REG_READ_VALID_MSK,
231 CSR_EEPROM_REG_READ_VALID_MSK,
232 IWL_EEPROM_ACCESS_TIMEOUT);
233 if (ret < 0) {
234 IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
235 addr);
236 goto done;
237 }
238 r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
239 e[addr / 2] = cpu_to_le16(r >> 16);
240 }
241
242 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
243 "EEPROM",
244 iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
245
246 ret = 0;
247done:
248 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
249
250err:
251 if (ret)
252 iwl_legacy_eeprom_free(priv);
253 /* Reset chip to save power until we load uCode during "up". */
254 iwl_legacy_apm_stop(priv);
255alloc_err:
256 return ret;
257}
258EXPORT_SYMBOL(iwl_legacy_eeprom_init);
259
260void iwl_legacy_eeprom_free(struct iwl_priv *priv)
261{
262 kfree(priv->eeprom);
263 priv->eeprom = NULL;
264}
265EXPORT_SYMBOL(iwl_legacy_eeprom_free);
266
267static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
268 int eep_band, int *eeprom_ch_count,
269 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const u8 **eeprom_ch_index)
271{
272 u32 offset = priv->cfg->ops->lib->
273 eeprom_ops.regulatory_bands[eep_band - 1];
274 switch (eep_band) {
275 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
277 *eeprom_ch_info = (struct iwl_eeprom_channel *)
278 iwl_legacy_eeprom_query_addr(priv, offset);
279 *eeprom_ch_index = iwlegacy_eeprom_band_1;
280 break;
281 case 2: /* 4.9GHz band */
282 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
283 *eeprom_ch_info = (struct iwl_eeprom_channel *)
284 iwl_legacy_eeprom_query_addr(priv, offset);
285 *eeprom_ch_index = iwlegacy_eeprom_band_2;
286 break;
287 case 3: /* 5.2GHz band */
288 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
289 *eeprom_ch_info = (struct iwl_eeprom_channel *)
290 iwl_legacy_eeprom_query_addr(priv, offset);
291 *eeprom_ch_index = iwlegacy_eeprom_band_3;
292 break;
293 case 4: /* 5.5GHz band */
294 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
295 *eeprom_ch_info = (struct iwl_eeprom_channel *)
296 iwl_legacy_eeprom_query_addr(priv, offset);
297 *eeprom_ch_index = iwlegacy_eeprom_band_4;
298 break;
299 case 5: /* 5.7GHz band */
300 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
301 *eeprom_ch_info = (struct iwl_eeprom_channel *)
302 iwl_legacy_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwlegacy_eeprom_band_5;
304 break;
305 case 6: /* 2.4GHz ht40 channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
307 *eeprom_ch_info = (struct iwl_eeprom_channel *)
308 iwl_legacy_eeprom_query_addr(priv, offset);
309 *eeprom_ch_index = iwlegacy_eeprom_band_6;
310 break;
311 case 7: /* 5 GHz ht40 channels */
312 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
313 *eeprom_ch_info = (struct iwl_eeprom_channel *)
314 iwl_legacy_eeprom_query_addr(priv, offset);
315 *eeprom_ch_index = iwlegacy_eeprom_band_7;
316 break;
317 default:
318 BUG();
319 }
320}
321
322#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
323 ? # x " " : "")
324/**
325 * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
326 *
327 * Does not set up a command, or touch hardware.
328 */
329static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
330 enum ieee80211_band band, u16 channel,
331 const struct iwl_eeprom_channel *eeprom_ch,
332 u8 clear_ht40_extension_channel)
333{
334 struct iwl_channel_info *ch_info;
335
336 ch_info = (struct iwl_channel_info *)
337 iwl_legacy_get_channel_info(priv, band, channel);
338
339 if (!iwl_legacy_is_channel_valid(ch_info))
340 return -1;
341
342 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
343 " Ad-Hoc %ssupported\n",
344 ch_info->channel,
345 iwl_legacy_is_channel_a_band(ch_info) ?
346 "5.2" : "2.4",
347 CHECK_AND_PRINT(IBSS),
348 CHECK_AND_PRINT(ACTIVE),
349 CHECK_AND_PRINT(RADAR),
350 CHECK_AND_PRINT(WIDE),
351 CHECK_AND_PRINT(DFS),
352 eeprom_ch->flags,
353 eeprom_ch->max_power_avg,
354 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
355 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
356 "" : "not ");
357
358 ch_info->ht40_eeprom = *eeprom_ch;
359 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
360 ch_info->ht40_flags = eeprom_ch->flags;
361 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
362 ch_info->ht40_extension_channel &=
363 ~clear_ht40_extension_channel;
364
365 return 0;
366}
367
368#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
369 ? # x " " : "")
370
371/**
372 * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
373 */
374int iwl_legacy_init_channel_map(struct iwl_priv *priv)
375{
376 int eeprom_ch_count = 0;
377 const u8 *eeprom_ch_index = NULL;
378 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
379 int band, ch;
380 struct iwl_channel_info *ch_info;
381
382 if (priv->channel_count) {
383 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
384 return 0;
385 }
386
387 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
388
389 priv->channel_count =
390 ARRAY_SIZE(iwlegacy_eeprom_band_1) +
391 ARRAY_SIZE(iwlegacy_eeprom_band_2) +
392 ARRAY_SIZE(iwlegacy_eeprom_band_3) +
393 ARRAY_SIZE(iwlegacy_eeprom_band_4) +
394 ARRAY_SIZE(iwlegacy_eeprom_band_5);
395
396 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
397 priv->channel_count);
398
399 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
400 priv->channel_count, GFP_KERNEL);
401 if (!priv->channel_info) {
402 IWL_ERR(priv, "Could not allocate channel_info\n");
403 priv->channel_count = 0;
404 return -ENOMEM;
405 }
406
407 ch_info = priv->channel_info;
408
409 /* Loop through the 5 EEPROM bands adding them in order to the
410 * channel map we maintain (that contains additional information than
411 * what just in the EEPROM) */
412 for (band = 1; band <= 5; band++) {
413
414 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
415 &eeprom_ch_info, &eeprom_ch_index);
416
417 /* Loop through each band adding each of the channels */
418 for (ch = 0; ch < eeprom_ch_count; ch++) {
419 ch_info->channel = eeprom_ch_index[ch];
420 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
421 IEEE80211_BAND_5GHZ;
422
423 /* permanently store EEPROM's channel regulatory flags
424 * and max power in channel info database. */
425 ch_info->eeprom = eeprom_ch_info[ch];
426
427 /* Copy the run-time flags so they are there even on
428 * invalid channels */
429 ch_info->flags = eeprom_ch_info[ch].flags;
430 /* First write that ht40 is not enabled, and then enable
431 * one by one */
432 ch_info->ht40_extension_channel =
433 IEEE80211_CHAN_NO_HT40;
434
435 if (!(iwl_legacy_is_channel_valid(ch_info))) {
436 IWL_DEBUG_EEPROM(priv,
437 "Ch. %d Flags %x [%sGHz] - "
438 "No traffic\n",
439 ch_info->channel,
440 ch_info->flags,
441 iwl_legacy_is_channel_a_band(ch_info) ?
442 "5.2" : "2.4");
443 ch_info++;
444 continue;
445 }
446
447 /* Initialize regulatory-based run-time data */
448 ch_info->max_power_avg = ch_info->curr_txpow =
449 eeprom_ch_info[ch].max_power_avg;
450 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
451 ch_info->min_power = 0;
452
453 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
454 "%s%s%s%s%s%s(0x%02x %ddBm):"
455 " Ad-Hoc %ssupported\n",
456 ch_info->channel,
457 iwl_legacy_is_channel_a_band(ch_info) ?
458 "5.2" : "2.4",
459 CHECK_AND_PRINT_I(VALID),
460 CHECK_AND_PRINT_I(IBSS),
461 CHECK_AND_PRINT_I(ACTIVE),
462 CHECK_AND_PRINT_I(RADAR),
463 CHECK_AND_PRINT_I(WIDE),
464 CHECK_AND_PRINT_I(DFS),
465 eeprom_ch_info[ch].flags,
466 eeprom_ch_info[ch].max_power_avg,
467 ((eeprom_ch_info[ch].
468 flags & EEPROM_CHANNEL_IBSS)
469 && !(eeprom_ch_info[ch].
470 flags & EEPROM_CHANNEL_RADAR))
471 ? "" : "not ");
472
473 ch_info++;
474 }
475 }
476
477 /* Check if we do have HT40 channels */
478 if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
479 EEPROM_REGULATORY_BAND_NO_HT40 &&
480 priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
481 EEPROM_REGULATORY_BAND_NO_HT40)
482 return 0;
483
484 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
485 for (band = 6; band <= 7; band++) {
486 enum ieee80211_band ieeeband;
487
488 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
489 &eeprom_ch_info, &eeprom_ch_index);
490
491 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
492 ieeeband =
493 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
494
495 /* Loop through each band adding each of the channels */
496 for (ch = 0; ch < eeprom_ch_count; ch++) {
497 /* Set up driver's info for lower half */
498 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
499 eeprom_ch_index[ch],
500 &eeprom_ch_info[ch],
501 IEEE80211_CHAN_NO_HT40PLUS);
502
503 /* Set up driver's info for upper half */
504 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
505 eeprom_ch_index[ch] + 4,
506 &eeprom_ch_info[ch],
507 IEEE80211_CHAN_NO_HT40MINUS);
508 }
509 }
510
511 return 0;
512}
513EXPORT_SYMBOL(iwl_legacy_init_channel_map);
514
515/*
516 * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
517 */
518void iwl_legacy_free_channel_map(struct iwl_priv *priv)
519{
520 kfree(priv->channel_info);
521 priv->channel_count = 0;
522}
523EXPORT_SYMBOL(iwl_legacy_free_channel_map);
524
525/**
526 * iwl_legacy_get_channel_info - Find driver's private channel info
527 *
528 * Based on band and channel number.
529 */
530const struct
531iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
532 enum ieee80211_band band, u16 channel)
533{
534 int i;
535
536 switch (band) {
537 case IEEE80211_BAND_5GHZ:
538 for (i = 14; i < priv->channel_count; i++) {
539 if (priv->channel_info[i].channel == channel)
540 return &priv->channel_info[i];
541 }
542 break;
543 case IEEE80211_BAND_2GHZ:
544 if (channel >= 1 && channel <= 14)
545 return &priv->channel_info[channel - 1];
546 break;
547 default:
548 BUG();
549 }
550
551 return NULL;
552}
553EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
deleted file mode 100644
index c59c81002022..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.h
+++ /dev/null
@@ -1,344 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_eeprom_h__
64#define __iwl_legacy_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113/* 3945 only */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __packed;
123
124/* 3945 Specific */
125#define EEPROM_3945_EEPROM_VERSION (0x2f)
126
127/* 4965 has two radio transmitters (and 3 radio receivers) */
128#define EEPROM_TX_POWER_TX_CHAINS (2)
129
130/* 4965 has room for up to 8 sets of txpower calibration data */
131#define EEPROM_TX_POWER_BANDS (8)
132
133/* 4965 factory calibration measures txpower gain settings for
134 * each of 3 target output levels */
135#define EEPROM_TX_POWER_MEASUREMENTS (3)
136
137/* 4965 Specific */
138/* 4965 driver does not work with txpower calibration version < 5 */
139#define EEPROM_4965_TX_POWER_VERSION (5)
140#define EEPROM_4965_EEPROM_VERSION (0x2f)
141#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
142#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
143#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
144#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
145
146/* 2.4 GHz */
147extern const u8 iwlegacy_eeprom_band_1[14];
148
149/*
150 * factory calibration data for one txpower level, on one channel,
151 * measured on one of the 2 tx chains (radio transmitter and associated
152 * antenna). EEPROM contains:
153 *
154 * 1) Temperature (degrees Celsius) of device when measurement was made.
155 *
156 * 2) Gain table index used to achieve the target measurement power.
157 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
158 *
159 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
160 *
161 * 4) RF power amplifier detector level measurement (not used).
162 */
163struct iwl_eeprom_calib_measure {
164 u8 temperature; /* Device temperature (Celsius) */
165 u8 gain_idx; /* Index into gain table */
166 u8 actual_pow; /* Measured RF output power, half-dBm */
167 s8 pa_det; /* Power amp detector level (not used) */
168} __packed;
169
170
171/*
172 * measurement set for one channel. EEPROM contains:
173 *
174 * 1) Channel number measured
175 *
176 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
177 * (a.k.a. "tx chains") (6 measurements altogether)
178 */
179struct iwl_eeprom_calib_ch_info {
180 u8 ch_num;
181 struct iwl_eeprom_calib_measure
182 measurements[EEPROM_TX_POWER_TX_CHAINS]
183 [EEPROM_TX_POWER_MEASUREMENTS];
184} __packed;
185
186/*
187 * txpower subband info.
188 *
189 * For each frequency subband, EEPROM contains the following:
190 *
191 * 1) First and last channels within range of the subband. "0" values
192 * indicate that this sample set is not being used.
193 *
194 * 2) Sample measurement sets for 2 channels close to the range endpoints.
195 */
196struct iwl_eeprom_calib_subband_info {
197 u8 ch_from; /* channel number of lowest channel in subband */
198 u8 ch_to; /* channel number of highest channel in subband */
199 struct iwl_eeprom_calib_ch_info ch1;
200 struct iwl_eeprom_calib_ch_info ch2;
201} __packed;
202
203
204/*
205 * txpower calibration info. EEPROM contains:
206 *
207 * 1) Factory-measured saturation power levels (maximum levels at which
208 * tx power amplifier can output a signal without too much distortion).
209 * There is one level for 2.4 GHz band and one for 5 GHz band. These
210 * values apply to all channels within each of the bands.
211 *
212 * 2) Factory-measured power supply voltage level. This is assumed to be
213 * constant (i.e. same value applies to all channels/bands) while the
214 * factory measurements are being made.
215 *
216 * 3) Up to 8 sets of factory-measured txpower calibration values.
217 * These are for different frequency ranges, since txpower gain
218 * characteristics of the analog radio circuitry vary with frequency.
219 *
220 * Not all sets need to be filled with data;
221 * struct iwl_eeprom_calib_subband_info contains range of channels
222 * (0 if unused) for each set of data.
223 */
224struct iwl_eeprom_calib_info {
225 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
226 u8 saturation_power52; /* half-dBm */
227 __le16 voltage; /* signed */
228 struct iwl_eeprom_calib_subband_info
229 band_info[EEPROM_TX_POWER_BANDS];
230} __packed;
231
232
233/* General */
234#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
235#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
236#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
237#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
238#define EEPROM_VERSION (2*0x44) /* 2 bytes */
239#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
240#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
241#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
242#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
243#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
244
245/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
246#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
247#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
248#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
249#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
250#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
251#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
252
253#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
254#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
255
256/*
257 * Per-channel regulatory data.
258 *
259 * Each channel that *might* be supported by iwl has a fixed location
260 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
261 * txpower (MSB).
262 *
263 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
264 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
265 *
266 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
267 */
268#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
269#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
270#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
271
272/*
273 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
274 * 5.0 GHz channels 7, 8, 11, 12, 16
275 * (4915-5080MHz) (none of these is ever supported)
276 */
277#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
278#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
279
280/*
281 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
282 * (5170-5320MHz)
283 */
284#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
285#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
286
287/*
288 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
289 * (5500-5700MHz)
290 */
291#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
292#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
293
294/*
295 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
296 * (5725-5825MHz)
297 */
298#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
299#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
300
301/*
302 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
303 *
304 * The channel listed is the center of the lower 20 MHz half of the channel.
305 * The overall center frequency is actually 2 channels (10 MHz) above that,
306 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
307 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
308 * and the overall HT40 channel width centers on channel 3.
309 *
310 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
311 * control channel to which to tune. RXON also specifies whether the
312 * control channel is the upper or lower half of a HT40 channel.
313 *
314 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
315 */
316#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
317
318/*
319 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
320 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
321 */
322#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
323
324#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
325
326struct iwl_eeprom_ops {
327 const u32 regulatory_bands[7];
328 int (*acquire_semaphore) (struct iwl_priv *priv);
329 void (*release_semaphore) (struct iwl_priv *priv);
330};
331
332
333int iwl_legacy_eeprom_init(struct iwl_priv *priv);
334void iwl_legacy_eeprom_free(struct iwl_priv *priv);
335const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
336 size_t offset);
337u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
338int iwl_legacy_init_channel_map(struct iwl_priv *priv);
339void iwl_legacy_free_channel_map(struct iwl_priv *priv);
340const struct iwl_channel_info *iwl_legacy_get_channel_info(
341 const struct iwl_priv *priv,
342 enum ieee80211_band band, u16 channel);
343
344#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
deleted file mode 100644
index 6e6091816e36..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-fh.h
+++ /dev/null
@@ -1,513 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_fh_h__
64#define __iwl_legacy_fh_h__
65
66/****************************/
67/* Flow Handler Definitions */
68/****************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH_MEM_LOWER_BOUND (0x1000)
75#define FH_MEM_UPPER_BOUND (0x2000)
76
77/**
78 * Keep-Warm (KW) buffer base address.
79 *
80 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
82 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
83 * from going into a power-savings mode that would cause higher DRAM latency,
84 * and possible data over/under-runs, before all Tx/Rx is complete.
85 *
86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
87 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
88 * automatically invokes keep-warm accesses when normal accesses might not
89 * be sufficient to maintain fast DRAM response.
90 *
91 * Bit fields:
92 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
93 */
94#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
95
96
97/**
98 * TFD Circular Buffers Base (CBBC) addresses
99 *
100 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
104 * aligned (address bits 0-7 must be 0).
105 *
106 * Bit fields in each pointer register:
107 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
108 */
109#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
110#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
111
112/* Find TFD CB base pointer for given queue (range 0-15). */
113#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
114
115
116/**
117 * Rx SRAM Control and Status Registers (RSCSR)
118 *
119 * These registers provide handshake between driver and 4965 for the Rx queue
120 * (this queue handles *all* command responses, notifications, Rx data, etc.
121 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
125 * mapping between RBDs and RBs.
126 *
127 * Driver must allocate host DRAM memory for the following, and set the
128 * physical address of each into 4965 registers:
129 *
130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
131 * entries (although any power of 2, up to 4096, is selectable by driver).
132 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
133 * (typically 4K, although 8K or 16K are also selectable by driver).
134 * Driver sets up RB size and number of RBDs in the CB via Rx config
135 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
136 *
137 * Bit fields within one RBD:
138 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
139 *
140 * Driver sets physical address [35:8] of base of RBD circular buffer
141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
142 *
143 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
144 * (RBs) have been filled, via a "write pointer", actually the index of
145 * the RB's corresponding RBD within the circular buffer. Driver sets
146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
147 *
148 * Bit fields in lower dword of Rx status buffer (upper dword not used
149 * by driver; see struct iwl4965_shared, val0):
150 * 31-12: Not used by driver
151 * 11- 0: Index of last filled Rx buffer descriptor
152 * (4965 writes, driver reads this value)
153 *
154 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
155 * enter pointers to these RBs into contiguous RBD circular buffer entries,
156 * and update the 4965's "write" index register,
157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
158 *
159 * This "write" index corresponds to the *next* RBD that the driver will make
160 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
161 * the circular buffer. This value should initially be 0 (before preparing any
162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
163 * wrap back to 0 at the end of the circular buffer (but don't wrap before
164 * "read" index has advanced past 1! See below).
165 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
166 *
167 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
169 * to tell the driver the index of the latest filled RBD. The driver must
170 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
171 *
172 * The driver must also internally keep track of a third index, which is the
173 * next RBD to process. When receiving an Rx interrupt, driver should process
174 * all filled but unprocessed RBs up to, but not including, the RB
175 * corresponding to the "read" index. For example, if "read" index becomes "1",
176 * driver may process the RB pointed to by RBD 0. Depending on volume of
177 * traffic, there may be many RBs to process.
178 *
179 * If read index == write index, 4965 thinks there is no room to put new data.
180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
181 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
182 * and "read" indexes; that is, make sure that there are no more than 254
183 * buffers waiting to be filled.
184 */
185#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
186#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
187#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
188
189/**
190 * Physical base address of 8-byte Rx Status buffer.
191 * Bit fields:
192 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
193 */
194#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
195
196/**
197 * Physical base address of Rx Buffer Descriptor Circular Buffer.
198 * Bit fields:
199 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
200 */
201#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
202
203/**
204 * Rx write pointer (index, really!).
205 * Bit fields:
206 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
207 * NOTE: For 256-entry circular buffer, use only bits [7:0].
208 */
209#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
210#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
211
212
213/**
214 * Rx Config/Status Registers (RCSR)
215 * Rx Config Reg for channel 0 (only channel used)
216 *
217 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
218 * normal operation (see bit fields).
219 *
220 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
221 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
222 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
223 *
224 * Bit fields:
225 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
226 * '10' operate normally
227 * 29-24: reserved
228 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
229 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
230 * 19-18: reserved
231 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
232 * '10' 12K, '11' 16K.
233 * 15-14: reserved
234 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
235 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
236 * typical value 0x10 (about 1/2 msec)
237 * 3- 0: reserved
238 */
239#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
240#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
241#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
242
243#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
244
245#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
246#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
247#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
248#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
249#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
250#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
251
252#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
253#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
254#define RX_RB_TIMEOUT (0x10)
255
256#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
257#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
258#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
259
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
262#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
263#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
264
265#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
271/**
272 * Rx Shared Status Registers (RSSR)
273 *
274 * After stopping Rx DMA channel (writing 0 to
275 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
276 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
277 *
278 * Bit fields:
279 * 24: 1 = Channel 0 is idle
280 *
281 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
282 * contain default values that should not be altered by the driver.
283 */
284#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
285#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
286
287#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
288#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
289#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
290 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
291
292#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
293
294#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
295
296/* TFDB Area - TFDs buffer table */
297#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
298#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
299#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
300#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
301#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
302
303/**
304 * Transmit DMA Channel Control/Status Registers (TCSR)
305 *
306 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
309 *
310 * To use a Tx DMA channel, driver must initialize its
311 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
312 *
313 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
314 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
315 *
316 * All other bits should be 0.
317 *
318 * Bit fields:
319 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
320 * '10' operate normally
321 * 29- 4: Reserved, set to "0"
322 * 3: Enable internal DMA requests (1, normal operation), disable (0)
323 * 2- 0: Reserved, set to "0"
324 */
325#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
327
328/* Find Control/Status reg for given Tx DMA/FIFO channel */
329#define FH49_TCSR_CHNL_NUM (7)
330#define FH50_TCSR_CHNL_NUM (8)
331
332/* TCSR: tx_config register values */
333#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
334 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
335#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
336 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
337#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
338 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
339
340#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
341#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
342
343#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
344#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
345
346#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
347#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
348#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
349
350#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
351#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
352#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
353
354#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
355#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
356#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
357
358#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
359#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
360#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
361
362#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
363#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
364
365/**
366 * Tx Shared Status Registers (TSSR)
367 *
368 * After stopping Tx DMA channel (writing 0 to
369 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
370 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
371 * (channel's buffers empty | no pending requests).
372 *
373 * Bit fields:
374 * 31-24: 1 = Channel buffers empty (channel 7:0)
375 * 23-16: 1 = No pending requests (channel 7:0)
376 */
377#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
378#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
379
380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
381
382/**
383 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
384 * 31: Indicates an address error when accessed to internal memory
385 * uCode/driver must write "1" in order to clear this flag
386 * 30: Indicates that Host did not send the expected number of dwords to FH
387 * uCode/driver must write "1" in order to clear this flag
388 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
389 * command was received from the scheduler while the TRB was already full
390 * with previous command
391 * uCode/driver must write "1" in order to clear this flag
392 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
393 * bit is set, it indicates that the FH has received a full indication
394 * from the RTC TxFIFO and the current value of the TxCredit counter was
395 * not equal to zero. This mean that the credit mechanism was not
396 * synchronized to the TxFIFO status
397 * uCode/driver must write "1" in order to clear this flag
398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400
401#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
402
403/* Tx service channels */
404#define FH_SRVC_CHNL (9)
405#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
406#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
407#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
408 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
409
410#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
411/* Instruct FH to increment the retry count of a packet when
412 * it is brought from the memory to TX-FIFO
413 */
414#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
415
416#define RX_QUEUE_SIZE 256
417#define RX_QUEUE_MASK 255
418#define RX_QUEUE_SIZE_LOG 8
419
420/*
421 * RX related structures and functions
422 */
423#define RX_FREE_BUFFERS 64
424#define RX_LOW_WATERMARK 8
425
426/* Size of one Rx buffer in host DRAM */
427#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
428#define IWL_RX_BUF_SIZE_4K (4 * 1024)
429#define IWL_RX_BUF_SIZE_8K (8 * 1024)
430
431/**
432 * struct iwl_rb_status - reseve buffer status
433 * host memory mapped FH registers
434 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
435 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
436 * @finished_rb_num [0:11] - Indicates the index of the current RB
437 * in which the last frame was written to
438 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
439 * which was transferred
440 */
441struct iwl_rb_status {
442 __le16 closed_rb_num;
443 __le16 closed_fr_num;
444 __le16 finished_rb_num;
445 __le16 finished_fr_nam;
446 __le32 __unused; /* 3945 only */
447} __packed;
448
449
450#define TFD_QUEUE_SIZE_MAX (256)
451#define TFD_QUEUE_SIZE_BC_DUP (64)
452#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
453#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
454#define IWL_NUM_OF_TBS 20
455
456static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
457{
458 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
459}
460/**
461 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
462 *
463 * This structure contains dma address and length of transmission address
464 *
465 * @lo: low [31:0] portion of the dma address of TX buffer
466 * every even is unaligned on 16 bit boundary
467 * @hi_n_len 0-3 [35:32] portion of dma
468 * 4-15 length of the tx buffer
469 */
470struct iwl_tfd_tb {
471 __le32 lo;
472 __le16 hi_n_len;
473} __packed;
474
475/**
476 * struct iwl_tfd
477 *
478 * Transmit Frame Descriptor (TFD)
479 *
480 * @ __reserved1[3] reserved
481 * @ num_tbs 0-4 number of active tbs
482 * 5 reserved
483 * 6-7 padding (not used)
484 * @ tbs[20] transmit frame buffer descriptors
485 * @ __pad padding
486 *
487 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
488 * Both driver and device share these circular buffers, each of which must be
489 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
490 *
491 * Driver must indicate the physical address of the base of each
492 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
493 *
494 * Each TFD contains pointer/size information for up to 20 data buffers
495 * in host DRAM. These buffers collectively contain the (one) frame described
496 * by the TFD. Each buffer must be a single contiguous block of memory within
497 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
498 * of (4K - 4). The concatenates all of a TFD's buffers into a single
499 * Tx frame, up to 8 KBytes in size.
500 *
501 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
502 */
503struct iwl_tfd {
504 u8 __reserved1[3];
505 u8 num_tbs;
506 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
507 __le32 __pad;
508} __packed;
509
510/* Keep Warm Size */
511#define IWL_KW_SIZE 0x1000 /* 4k */
512
513#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
deleted file mode 100644
index ce1fc9feb61f..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ /dev/null
@@ -1,271 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <net/mac80211.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-core.h"
38
39
40const char *iwl_legacy_get_cmd_string(u8 cmd)
41{
42 switch (cmd) {
43 IWL_CMD(REPLY_ALIVE);
44 IWL_CMD(REPLY_ERROR);
45 IWL_CMD(REPLY_RXON);
46 IWL_CMD(REPLY_RXON_ASSOC);
47 IWL_CMD(REPLY_QOS_PARAM);
48 IWL_CMD(REPLY_RXON_TIMING);
49 IWL_CMD(REPLY_ADD_STA);
50 IWL_CMD(REPLY_REMOVE_STA);
51 IWL_CMD(REPLY_WEPKEY);
52 IWL_CMD(REPLY_3945_RX);
53 IWL_CMD(REPLY_TX);
54 IWL_CMD(REPLY_RATE_SCALE);
55 IWL_CMD(REPLY_LEDS_CMD);
56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
57 IWL_CMD(REPLY_CHANNEL_SWITCH);
58 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
59 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
60 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
61 IWL_CMD(POWER_TABLE_CMD);
62 IWL_CMD(PM_SLEEP_NOTIFICATION);
63 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
64 IWL_CMD(REPLY_SCAN_CMD);
65 IWL_CMD(REPLY_SCAN_ABORT_CMD);
66 IWL_CMD(SCAN_START_NOTIFICATION);
67 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
68 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
69 IWL_CMD(BEACON_NOTIFICATION);
70 IWL_CMD(REPLY_TX_BEACON);
71 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
72 IWL_CMD(REPLY_BT_CONFIG);
73 IWL_CMD(REPLY_STATISTICS_CMD);
74 IWL_CMD(STATISTICS_NOTIFICATION);
75 IWL_CMD(CARD_STATE_NOTIFICATION);
76 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
77 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
78 IWL_CMD(SENSITIVITY_CMD);
79 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
80 IWL_CMD(REPLY_RX_PHY_CMD);
81 IWL_CMD(REPLY_RX_MPDU_CMD);
82 IWL_CMD(REPLY_RX);
83 IWL_CMD(REPLY_COMPRESSED_BA);
84 default:
85 return "UNKNOWN";
86
87 }
88}
89EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
90
91#define HOST_COMPLETE_TIMEOUT (HZ / 2)
92
93static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
94 struct iwl_device_cmd *cmd,
95 struct iwl_rx_packet *pkt)
96{
97 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
98 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
99 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
100 return;
101 }
102
103#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
104 switch (cmd->hdr.cmd) {
105 case REPLY_TX_LINK_QUALITY_CMD:
106 case SENSITIVITY_CMD:
107 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
108 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
109 break;
110 default:
111 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
112 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
113 }
114#endif
115}
116
117static int
118iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
119{
120 int ret;
121
122 BUG_ON(!(cmd->flags & CMD_ASYNC));
123
124 /* An asynchronous command can not expect an SKB to be set. */
125 BUG_ON(cmd->flags & CMD_WANT_SKB);
126
127 /* Assign a generic callback if one is not provided */
128 if (!cmd->callback)
129 cmd->callback = iwl_legacy_generic_cmd_callback;
130
131 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
132 return -EBUSY;
133
134 ret = iwl_legacy_enqueue_hcmd(priv, cmd);
135 if (ret < 0) {
136 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
137 iwl_legacy_get_cmd_string(cmd->id), ret);
138 return ret;
139 }
140 return 0;
141}
142
143int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
144{
145 int cmd_idx;
146 int ret;
147
148 lockdep_assert_held(&priv->mutex);
149
150 BUG_ON(cmd->flags & CMD_ASYNC);
151
152 /* A synchronous command can not have a callback set. */
153 BUG_ON(cmd->callback);
154
155 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
156 iwl_legacy_get_cmd_string(cmd->id));
157
158 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
159 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
160 iwl_legacy_get_cmd_string(cmd->id));
161
162 cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
163 if (cmd_idx < 0) {
164 ret = cmd_idx;
165 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
166 iwl_legacy_get_cmd_string(cmd->id), ret);
167 goto out;
168 }
169
170 ret = wait_event_timeout(priv->wait_command_queue,
171 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
172 HOST_COMPLETE_TIMEOUT);
173 if (!ret) {
174 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
175 IWL_ERR(priv,
176 "Error sending %s: time out after %dms.\n",
177 iwl_legacy_get_cmd_string(cmd->id),
178 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
179
180 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
181 IWL_DEBUG_INFO(priv,
182 "Clearing HCMD_ACTIVE for command %s\n",
183 iwl_legacy_get_cmd_string(cmd->id));
184 ret = -ETIMEDOUT;
185 goto cancel;
186 }
187 }
188
189 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
190 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
191 iwl_legacy_get_cmd_string(cmd->id));
192 ret = -ECANCELED;
193 goto fail;
194 }
195 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
196 IWL_ERR(priv, "Command %s failed: FW Error\n",
197 iwl_legacy_get_cmd_string(cmd->id));
198 ret = -EIO;
199 goto fail;
200 }
201 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
202 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
203 iwl_legacy_get_cmd_string(cmd->id));
204 ret = -EIO;
205 goto cancel;
206 }
207
208 ret = 0;
209 goto out;
210
211cancel:
212 if (cmd->flags & CMD_WANT_SKB) {
213 /*
214 * Cancel the CMD_WANT_SKB flag for the cmd in the
215 * TX cmd queue. Otherwise in case the cmd comes
216 * in later, it will possibly set an invalid
217 * address (cmd->meta.source).
218 */
219 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
220 ~CMD_WANT_SKB;
221 }
222fail:
223 if (cmd->reply_page) {
224 iwl_legacy_free_pages(priv, cmd->reply_page);
225 cmd->reply_page = 0;
226 }
227out:
228 return ret;
229}
230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
231
232int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
233{
234 if (cmd->flags & CMD_ASYNC)
235 return iwl_legacy_send_cmd_async(priv, cmd);
236
237 return iwl_legacy_send_cmd_sync(priv, cmd);
238}
239EXPORT_SYMBOL(iwl_legacy_send_cmd);
240
241int
242iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
243{
244 struct iwl_host_cmd cmd = {
245 .id = id,
246 .len = len,
247 .data = data,
248 };
249
250 return iwl_legacy_send_cmd_sync(priv, &cmd);
251}
252EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
253
254int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
255 u8 id, u16 len, const void *data,
256 void (*callback)(struct iwl_priv *priv,
257 struct iwl_device_cmd *cmd,
258 struct iwl_rx_packet *pkt))
259{
260 struct iwl_host_cmd cmd = {
261 .id = id,
262 .len = len,
263 .data = data,
264 };
265
266 cmd.flags |= CMD_ASYNC;
267 cmd.callback = callback;
268
269 return iwl_legacy_send_cmd_async(priv, &cmd);
270}
271EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
deleted file mode 100644
index 5cf23eaecbbb..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ /dev/null
@@ -1,196 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_legacy_helpers_h__
31#define __iwl_legacy_helpers_h__
32
33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37
38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
39
40
41static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
42 struct ieee80211_hw *hw)
43{
44 return &hw->conf;
45}
46
47/**
48 * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
49 * @index -- current index
50 * @n_bd -- total number of entries in queue (must be power of 2)
51 */
52static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
53{
54 return ++index & (n_bd - 1);
55}
56
57/**
58 * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
59 * @index -- current index
60 * @n_bd -- total number of entries in queue (must be power of 2)
61 */
62static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
63{
64 return --index & (n_bd - 1);
65}
66
67/* TODO: Move fw_desc functions to iwl-pci.ko */
68static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
69 struct fw_desc *desc)
70{
71 if (desc->v_addr)
72 dma_free_coherent(&pci_dev->dev, desc->len,
73 desc->v_addr, desc->p_addr);
74 desc->v_addr = NULL;
75 desc->len = 0;
76}
77
78static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
79 struct fw_desc *desc)
80{
81 if (!desc->len) {
82 desc->v_addr = NULL;
83 return -EINVAL;
84 }
85
86 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
87 &desc->p_addr, GFP_KERNEL);
88 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
89}
90
91/*
92 * we have 8 bits used like this:
93 *
94 * 7 6 5 4 3 2 1 0
95 * | | | | | | | |
96 * | | | | | | +-+-------- AC queue (0-3)
97 * | | | | | |
98 * | +-+-+-+-+------------ HW queue ID
99 * |
100 * +---------------------- unused
101 */
102static inline void
103iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
104{
105 BUG_ON(ac > 3); /* only have 2 bits */
106 BUG_ON(hwq > 31); /* only use 5 bits */
107
108 txq->swq_id = (hwq << 2) | ac;
109}
110
111static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
112 struct iwl_tx_queue *txq)
113{
114 u8 queue = txq->swq_id;
115 u8 ac = queue & 3;
116 u8 hwq = (queue >> 2) & 0x1f;
117
118 if (test_and_clear_bit(hwq, priv->queue_stopped))
119 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
120 ieee80211_wake_queue(priv->hw, ac);
121}
122
123static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
124 struct iwl_tx_queue *txq)
125{
126 u8 queue = txq->swq_id;
127 u8 ac = queue & 3;
128 u8 hwq = (queue >> 2) & 0x1f;
129
130 if (!test_and_set_bit(hwq, priv->queue_stopped))
131 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
132 ieee80211_stop_queue(priv->hw, ac);
133}
134
135#ifdef ieee80211_stop_queue
136#undef ieee80211_stop_queue
137#endif
138
139#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
140
141#ifdef ieee80211_wake_queue
142#undef ieee80211_wake_queue
143#endif
144
145#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
146
147static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
148{
149 clear_bit(STATUS_INT_ENABLED, &priv->status);
150
151 /* disable interrupts from uCode/NIC to host */
152 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
153
154 /* acknowledge/clear/reset any interrupts still pending
155 * from uCode or flow handler (Rx/Tx DMA) */
156 iwl_write32(priv, CSR_INT, 0xffffffff);
157 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
158 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
159}
160
161static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
162{
163 IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
164 iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
165}
166
167static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
168{
169 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
170 set_bit(STATUS_INT_ENABLED, &priv->status);
171 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
172}
173
174/**
175 * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
176 * @priv -- pointer to iwl_priv data structure
177 * @tsf_bits -- number of bits need to shift for masking)
178 */
179static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
180 u16 tsf_bits)
181{
182 return (1 << tsf_bits) - 1;
183}
184
185/**
186 * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
187 * @priv -- pointer to iwl_priv data structure
188 * @tsf_bits -- number of bits need to shift for masking)
189 */
190static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
191 u16 tsf_bits)
192{
193 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
194}
195
196#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
deleted file mode 100644
index 5cc5d342914f..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-io.h
+++ /dev/null
@@ -1,545 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_io_h__
30#define __iwl_legacy_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-devtrace.h"
37
38/*
39 * IO, register, and NIC memory access functions
40 *
41 * NOTE on naming convention and macro usage for these
42 *
43 * A single _ prefix before a an access function means that no state
44 * check or debug information is printed when that function is called.
45 *
46 * A double __ prefix before an access function means that state is checked
47 * and the current line number and caller function name are printed in addition
48 * to any other debug output.
49 *
50 * The non-prefixed name is the #define that maps the caller into a
51 * #define that provides the caller's name and __LINE__ to the double
52 * prefix version.
53 *
54 * If you wish to call the function without any debug or state checking,
55 * you should use the single _ prefix version (as is used by dependent IO
56 * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
57 * _iwl_legacy_read32.)
58 *
59 * These declarations are *extremely* useful in quickly isolating code deltas
60 * which result in misconfiguration of the hardware I/O. In combination with
61 * git-bisect and the IO debug level you can quickly determine the specific
62 * commit which breaks the IO sequence to the hardware.
63 *
64 */
65
66static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
67{
68 trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
69 iowrite8(val, priv->hw_base + ofs);
70}
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73static inline void
74__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
75 u32 ofs, u8 val)
76{
77 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
78 _iwl_legacy_write8(priv, ofs, val);
79}
80#define iwl_write8(priv, ofs, val) \
81 __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
82#else
83#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
84#endif
85
86
87static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
88{
89 trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
90 iowrite32(val, priv->hw_base + ofs);
91}
92
93#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
94static inline void
95__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
96 u32 ofs, u32 val)
97{
98 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
99 _iwl_legacy_write32(priv, ofs, val);
100}
101#define iwl_write32(priv, ofs, val) \
102 __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
103#else
104#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
105#endif
106
107static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
108{
109 u32 val = ioread32(priv->hw_base + ofs);
110 trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
111 return val;
112}
113
114#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
115static inline u32
116__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
117{
118 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
119 return _iwl_legacy_read32(priv, ofs);
120}
121#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
122#else
123#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
124#endif
125
126#define IWL_POLL_INTERVAL 10 /* microseconds */
127static inline int
128_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
129 u32 bits, u32 mask, int timeout)
130{
131 int t = 0;
132
133 do {
134 if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
135 return t;
136 udelay(IWL_POLL_INTERVAL);
137 t += IWL_POLL_INTERVAL;
138 } while (t < timeout);
139
140 return -ETIMEDOUT;
141}
142#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
143static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
144 struct iwl_priv *priv, u32 addr,
145 u32 bits, u32 mask, int timeout)
146{
147 int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
148 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
149 addr, bits, mask,
150 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
151 return ret;
152}
153#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
154 __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
155 bits, mask, timeout)
156#else
157#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
158#endif
159
160static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
161{
162 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
163}
164#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
165static inline void __iwl_legacy_set_bit(const char *f, u32 l,
166 struct iwl_priv *priv, u32 reg, u32 mask)
167{
168 u32 val = _iwl_legacy_read32(priv, reg) | mask;
169 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
170 mask, val);
171 _iwl_legacy_write32(priv, reg, val);
172}
173static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
174{
175 unsigned long reg_flags;
176
177 spin_lock_irqsave(&p->reg_lock, reg_flags);
178 __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
179 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
180}
181#else
182static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
183{
184 unsigned long reg_flags;
185
186 spin_lock_irqsave(&p->reg_lock, reg_flags);
187 _iwl_legacy_set_bit(p, r, m);
188 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
189}
190#endif
191
192static inline void
193_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
194{
195 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
196}
197#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
198static inline void
199__iwl_legacy_clear_bit(const char *f, u32 l,
200 struct iwl_priv *priv, u32 reg, u32 mask)
201{
202 u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
203 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
204 _iwl_legacy_write32(priv, reg, val);
205}
206static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
207{
208 unsigned long reg_flags;
209
210 spin_lock_irqsave(&p->reg_lock, reg_flags);
211 __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
212 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
213}
214#else
215static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
216{
217 unsigned long reg_flags;
218
219 spin_lock_irqsave(&p->reg_lock, reg_flags);
220 _iwl_legacy_clear_bit(p, r, m);
221 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
222}
223#endif
224
225static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
226{
227 int ret;
228 u32 val;
229
230 /* this bit wakes up the NIC */
231 _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
233
234 /*
235 * These bits say the device is running, and should keep running for
236 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
237 * but they do not indicate that embedded SRAM is restored yet;
238 * 3945 and 4965 have volatile SRAM, and must save/restore contents
239 * to/from host DRAM when sleeping/waking for power-saving.
240 * Each direction takes approximately 1/4 millisecond; with this
241 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
242 * series of register accesses are expected (e.g. reading Event Log),
243 * to keep device from sleeping.
244 *
245 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
246 * SRAM is okay/restored. We don't check that here because this call
247 * is just for hardware register access; but GP1 MAC_SLEEP check is a
248 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
249 *
250 */
251 ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
252 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
253 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
254 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
255 if (ret < 0) {
256 val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
257 IWL_ERR(priv,
258 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
259 _iwl_legacy_write32(priv, CSR_RESET,
260 CSR_RESET_REG_FLAG_FORCE_NMI);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
268static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
269 struct iwl_priv *priv)
270{
271 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
272 return _iwl_legacy_grab_nic_access(priv);
273}
274#define iwl_grab_nic_access(priv) \
275 __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
276#else
277#define iwl_grab_nic_access(priv) \
278 _iwl_legacy_grab_nic_access(priv)
279#endif
280
281static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
282{
283 _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
284 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
285}
286#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
287static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
288 struct iwl_priv *priv)
289{
290
291 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
292 _iwl_legacy_release_nic_access(priv);
293}
294#define iwl_release_nic_access(priv) \
295 __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
296#else
297#define iwl_release_nic_access(priv) \
298 _iwl_legacy_release_nic_access(priv)
299#endif
300
301static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
302{
303 return _iwl_legacy_read32(priv, reg);
304}
305#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
306static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
307 struct iwl_priv *priv, u32 reg)
308{
309 u32 value = _iwl_legacy_read_direct32(priv, reg);
310 IWL_DEBUG_IO(priv,
311 "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
312 f, l);
313 return value;
314}
315static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
316{
317 u32 value;
318 unsigned long reg_flags;
319
320 spin_lock_irqsave(&priv->reg_lock, reg_flags);
321 iwl_grab_nic_access(priv);
322 value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
323 iwl_release_nic_access(priv);
324 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
325 return value;
326}
327
328#else
329static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
330{
331 u32 value;
332 unsigned long reg_flags;
333
334 spin_lock_irqsave(&priv->reg_lock, reg_flags);
335 iwl_grab_nic_access(priv);
336 value = _iwl_legacy_read_direct32(priv, reg);
337 iwl_release_nic_access(priv);
338 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
339 return value;
340
341}
342#endif
343
344static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
345 u32 reg, u32 value)
346{
347 _iwl_legacy_write32(priv, reg, value);
348}
349static inline void
350iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
351{
352 unsigned long reg_flags;
353
354 spin_lock_irqsave(&priv->reg_lock, reg_flags);
355 if (!iwl_grab_nic_access(priv)) {
356 _iwl_legacy_write_direct32(priv, reg, value);
357 iwl_release_nic_access(priv);
358 }
359 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
360}
361
362static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
363 u32 reg, u32 len, u32 *values)
364{
365 u32 count = sizeof(u32);
366
367 if ((priv != NULL) && (values != NULL)) {
368 for (; 0 < len; len -= count, reg += count, values++)
369 iwl_legacy_write_direct32(priv, reg, *values);
370 }
371}
372
373static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
374 u32 mask, int timeout)
375{
376 int t = 0;
377
378 do {
379 if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
380 return t;
381 udelay(IWL_POLL_INTERVAL);
382 t += IWL_POLL_INTERVAL;
383 } while (t < timeout);
384
385 return -ETIMEDOUT;
386}
387
388#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
389static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
390 struct iwl_priv *priv,
391 u32 addr, u32 mask, int timeout)
392{
393 int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
394
395 if (unlikely(ret == -ETIMEDOUT))
396 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
397 "timedout - %s %d\n", addr, mask, f, l);
398 else
399 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
400 "- %s %d\n", addr, mask, ret, f, l);
401 return ret;
402}
403#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
404__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
405#else
406#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
407#endif
408
409static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
410{
411 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
412 rmb();
413 return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
414}
415static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
416{
417 unsigned long reg_flags;
418 u32 val;
419
420 spin_lock_irqsave(&priv->reg_lock, reg_flags);
421 iwl_grab_nic_access(priv);
422 val = _iwl_legacy_read_prph(priv, reg);
423 iwl_release_nic_access(priv);
424 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
425 return val;
426}
427
428static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
429 u32 addr, u32 val)
430{
431 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
432 ((addr & 0x0000FFFF) | (3 << 24)));
433 wmb();
434 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
435}
436
437static inline void
438iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
439{
440 unsigned long reg_flags;
441
442 spin_lock_irqsave(&priv->reg_lock, reg_flags);
443 if (!iwl_grab_nic_access(priv)) {
444 _iwl_legacy_write_prph(priv, addr, val);
445 iwl_release_nic_access(priv);
446 }
447 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
448}
449
450#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
451_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
452
453static inline void
454iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
455{
456 unsigned long reg_flags;
457
458 spin_lock_irqsave(&priv->reg_lock, reg_flags);
459 iwl_grab_nic_access(priv);
460 _iwl_legacy_set_bits_prph(priv, reg, mask);
461 iwl_release_nic_access(priv);
462 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
463}
464
465#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
466_iwl_legacy_write_prph(priv, reg, \
467 ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
468
469static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
470 u32 bits, u32 mask)
471{
472 unsigned long reg_flags;
473
474 spin_lock_irqsave(&priv->reg_lock, reg_flags);
475 iwl_grab_nic_access(priv);
476 _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
477 iwl_release_nic_access(priv);
478 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
479}
480
481static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
482 *priv, u32 reg, u32 mask)
483{
484 unsigned long reg_flags;
485 u32 val;
486
487 spin_lock_irqsave(&priv->reg_lock, reg_flags);
488 iwl_grab_nic_access(priv);
489 val = _iwl_legacy_read_prph(priv, reg);
490 _iwl_legacy_write_prph(priv, reg, (val & ~mask));
491 iwl_release_nic_access(priv);
492 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
493}
494
495static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
496{
497 unsigned long reg_flags;
498 u32 value;
499
500 spin_lock_irqsave(&priv->reg_lock, reg_flags);
501 iwl_grab_nic_access(priv);
502
503 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
504 rmb();
505 value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
506
507 iwl_release_nic_access(priv);
508 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
509 return value;
510}
511
512static inline void
513iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
514{
515 unsigned long reg_flags;
516
517 spin_lock_irqsave(&priv->reg_lock, reg_flags);
518 if (!iwl_grab_nic_access(priv)) {
519 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
520 wmb();
521 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
522 iwl_release_nic_access(priv);
523 }
524 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
525}
526
527static inline void
528iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
529 u32 len, u32 *values)
530{
531 unsigned long reg_flags;
532
533 spin_lock_irqsave(&priv->reg_lock, reg_flags);
534 if (!iwl_grab_nic_access(priv)) {
535 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
536 wmb();
537 for (; 0 < len; len -= sizeof(u32), values++)
538 _iwl_legacy_write_direct32(priv,
539 HBUS_TARG_MEM_WDAT, *values);
540
541 iwl_release_nic_access(priv);
542 }
543 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
544}
545#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
deleted file mode 100644
index dc568a474c5d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ /dev/null
@@ -1,205 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43
44/* default: IWL_LED_BLINK(0) using blinking index table */
45static int led_mode;
46module_param(led_mode, int, S_IRUGO);
47MODULE_PARM_DESC(led_mode, "0=system default, "
48 "1=On(RF On)/Off(RF Off), 2=blinking");
49
50/* Throughput OFF time(ms) ON time (ms)
51 * >300 25 25
52 * >200 to 300 40 40
53 * >100 to 200 55 55
54 * >70 to 100 65 65
55 * >50 to 70 75 75
56 * >20 to 50 85 85
57 * >10 to 20 95 95
58 * >5 to 10 110 110
59 * >1 to 5 130 130
60 * >0 to 1 167 167
61 * <=0 SOLID ON
62 */
63static const struct ieee80211_tpt_blink iwl_blink[] = {
64 { .throughput = 0, .blink_time = 334 },
65 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
66 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
67 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
68 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
69 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
70 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
71 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
72 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
73 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
74};
75
76/*
77 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
78 * Led blink rate analysis showed an average deviation of 0% on 3945,
79 * 5% on 4965 HW.
80 * Need to compensate on the led on/off time per HW according to the deviation
81 * to achieve the desired led frequency
82 * The calculation is: (100-averageDeviation)/100 * blinkTime
83 * For code efficiency the calculation will be:
84 * compensation = (100 - averageDeviation) * 64 / 100
85 * NewBlinkTime = (compensation * BlinkTime) / 64
86 */
87static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
88 u8 time, u16 compensation)
89{
90 if (!compensation) {
91 IWL_ERR(priv, "undefined blink compensation: "
92 "use pre-defined blinking time\n");
93 return time;
94 }
95
96 return (u8)((time * compensation) >> 6);
97}
98
99/* Set led pattern command */
100static int iwl_legacy_led_cmd(struct iwl_priv *priv,
101 unsigned long on,
102 unsigned long off)
103{
104 struct iwl_led_cmd led_cmd = {
105 .id = IWL_LED_LINK,
106 .interval = IWL_DEF_LED_INTRVL
107 };
108 int ret;
109
110 if (!test_bit(STATUS_READY, &priv->status))
111 return -EBUSY;
112
113 if (priv->blink_on == on && priv->blink_off == off)
114 return 0;
115
116 if (off == 0) {
117 /* led is SOLID_ON */
118 on = IWL_LED_SOLID;
119 }
120
121 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
122 priv->cfg->base_params->led_compensation);
123 led_cmd.on = iwl_legacy_blink_compensation(priv, on,
124 priv->cfg->base_params->led_compensation);
125 led_cmd.off = iwl_legacy_blink_compensation(priv, off,
126 priv->cfg->base_params->led_compensation);
127
128 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
129 if (!ret) {
130 priv->blink_on = on;
131 priv->blink_off = off;
132 }
133 return ret;
134}
135
136static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
137 enum led_brightness brightness)
138{
139 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
140 unsigned long on = 0;
141
142 if (brightness > 0)
143 on = IWL_LED_SOLID;
144
145 iwl_legacy_led_cmd(priv, on, 0);
146}
147
148static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
149 unsigned long *delay_on,
150 unsigned long *delay_off)
151{
152 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
153
154 return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
155}
156
157void iwl_legacy_leds_init(struct iwl_priv *priv)
158{
159 int mode = led_mode;
160 int ret;
161
162 if (mode == IWL_LED_DEFAULT)
163 mode = priv->cfg->led_mode;
164
165 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
166 wiphy_name(priv->hw->wiphy));
167 priv->led.brightness_set = iwl_legacy_led_brightness_set;
168 priv->led.blink_set = iwl_legacy_led_blink_set;
169 priv->led.max_brightness = 1;
170
171 switch (mode) {
172 case IWL_LED_DEFAULT:
173 WARN_ON(1);
174 break;
175 case IWL_LED_BLINK:
176 priv->led.default_trigger =
177 ieee80211_create_tpt_led_trigger(priv->hw,
178 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
179 iwl_blink, ARRAY_SIZE(iwl_blink));
180 break;
181 case IWL_LED_RF_STATE:
182 priv->led.default_trigger =
183 ieee80211_get_radio_led_name(priv->hw);
184 break;
185 }
186
187 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
188 if (ret) {
189 kfree(priv->led.name);
190 return;
191 }
192
193 priv->led_registered = true;
194}
195EXPORT_SYMBOL(iwl_legacy_leds_init);
196
197void iwl_legacy_leds_exit(struct iwl_priv *priv)
198{
199 if (!priv->led_registered)
200 return;
201
202 led_classdev_unregister(&priv->led);
203 kfree(priv->led.name);
204}
205EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
deleted file mode 100644
index f0791f70f79d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_leds_h__
28#define __iwl_legacy_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39/*
40 * LED mode
41 * IWL_LED_DEFAULT: use device default
42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
43 * LED ON = RF ON
44 * LED OFF = RF OFF
45 * IWL_LED_BLINK: adjust led blink rate based on blink table
46 */
47enum iwl_led_mode {
48 IWL_LED_DEFAULT,
49 IWL_LED_RF_STATE,
50 IWL_LED_BLINK,
51};
52
53void iwl_legacy_leds_init(struct iwl_priv *priv);
54void iwl_legacy_leds_exit(struct iwl_priv *priv);
55
56#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
deleted file mode 100644
index 38647e481eb0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
+++ /dev/null
@@ -1,456 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_rs_h__
28#define __iwl_legacy_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
33 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
34 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
35 u8 prev_ieee; /* previous rate in IEEE speeds */
36 u8 next_ieee; /* next rate in IEEE speeds */
37 u8 prev_rs; /* previous rate used in rs algo */
38 u8 next_rs; /* next rate used in rs algo */
39 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
40 u8 next_rs_tgg; /* next rate used in TGG rs algo */
41};
42
43struct iwl3945_rate_info {
44 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
45 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
46 u8 prev_ieee; /* previous rate in IEEE speeds */
47 u8 next_ieee; /* next rate in IEEE speeds */
48 u8 prev_rs; /* previous rate used in rs algo */
49 u8 next_rs; /* next rate used in rs algo */
50 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
51 u8 next_rs_tgg; /* next rate used in TGG rs algo */
52 u8 table_rs_index; /* index in rate scale table cmd */
53 u8 prev_table_rs; /* prev in rate table cmd */
54};
55
56
57/*
58 * These serve as indexes into
59 * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
60 */
61enum {
62 IWL_RATE_1M_INDEX = 0,
63 IWL_RATE_2M_INDEX,
64 IWL_RATE_5M_INDEX,
65 IWL_RATE_11M_INDEX,
66 IWL_RATE_6M_INDEX,
67 IWL_RATE_9M_INDEX,
68 IWL_RATE_12M_INDEX,
69 IWL_RATE_18M_INDEX,
70 IWL_RATE_24M_INDEX,
71 IWL_RATE_36M_INDEX,
72 IWL_RATE_48M_INDEX,
73 IWL_RATE_54M_INDEX,
74 IWL_RATE_60M_INDEX,
75 IWL_RATE_COUNT,
76 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
77 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
78 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
79 IWL_RATE_INVALID = IWL_RATE_COUNT,
80};
81
82enum {
83 IWL_RATE_6M_INDEX_TABLE = 0,
84 IWL_RATE_9M_INDEX_TABLE,
85 IWL_RATE_12M_INDEX_TABLE,
86 IWL_RATE_18M_INDEX_TABLE,
87 IWL_RATE_24M_INDEX_TABLE,
88 IWL_RATE_36M_INDEX_TABLE,
89 IWL_RATE_48M_INDEX_TABLE,
90 IWL_RATE_54M_INDEX_TABLE,
91 IWL_RATE_1M_INDEX_TABLE,
92 IWL_RATE_2M_INDEX_TABLE,
93 IWL_RATE_5M_INDEX_TABLE,
94 IWL_RATE_11M_INDEX_TABLE,
95 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
96};
97
98enum {
99 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
100 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
101 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
102 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
103 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
104};
105
106/* #define vs. enum to keep from defaulting to 'large integer' */
107#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
108#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
109#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
110#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
111#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
112#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
113#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
114#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
115#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
116#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
117#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
118#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
119#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
120
121/* uCode API values for legacy bit rates, both OFDM and CCK */
122enum {
123 IWL_RATE_6M_PLCP = 13,
124 IWL_RATE_9M_PLCP = 15,
125 IWL_RATE_12M_PLCP = 5,
126 IWL_RATE_18M_PLCP = 7,
127 IWL_RATE_24M_PLCP = 9,
128 IWL_RATE_36M_PLCP = 11,
129 IWL_RATE_48M_PLCP = 1,
130 IWL_RATE_54M_PLCP = 3,
131 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
132 IWL_RATE_1M_PLCP = 10,
133 IWL_RATE_2M_PLCP = 20,
134 IWL_RATE_5M_PLCP = 55,
135 IWL_RATE_11M_PLCP = 110,
136 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
137};
138
139/* uCode API values for OFDM high-throughput (HT) bit rates */
140enum {
141 IWL_RATE_SISO_6M_PLCP = 0,
142 IWL_RATE_SISO_12M_PLCP = 1,
143 IWL_RATE_SISO_18M_PLCP = 2,
144 IWL_RATE_SISO_24M_PLCP = 3,
145 IWL_RATE_SISO_36M_PLCP = 4,
146 IWL_RATE_SISO_48M_PLCP = 5,
147 IWL_RATE_SISO_54M_PLCP = 6,
148 IWL_RATE_SISO_60M_PLCP = 7,
149 IWL_RATE_MIMO2_6M_PLCP = 0x8,
150 IWL_RATE_MIMO2_12M_PLCP = 0x9,
151 IWL_RATE_MIMO2_18M_PLCP = 0xa,
152 IWL_RATE_MIMO2_24M_PLCP = 0xb,
153 IWL_RATE_MIMO2_36M_PLCP = 0xc,
154 IWL_RATE_MIMO2_48M_PLCP = 0xd,
155 IWL_RATE_MIMO2_54M_PLCP = 0xe,
156 IWL_RATE_MIMO2_60M_PLCP = 0xf,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159};
160
161/* MAC header values for bit rates */
162enum {
163 IWL_RATE_6M_IEEE = 12,
164 IWL_RATE_9M_IEEE = 18,
165 IWL_RATE_12M_IEEE = 24,
166 IWL_RATE_18M_IEEE = 36,
167 IWL_RATE_24M_IEEE = 48,
168 IWL_RATE_36M_IEEE = 72,
169 IWL_RATE_48M_IEEE = 96,
170 IWL_RATE_54M_IEEE = 108,
171 IWL_RATE_60M_IEEE = 120,
172 IWL_RATE_1M_IEEE = 2,
173 IWL_RATE_2M_IEEE = 4,
174 IWL_RATE_5M_IEEE = 11,
175 IWL_RATE_11M_IEEE = 22,
176};
177
178#define IWL_CCK_BASIC_RATES_MASK \
179 (IWL_RATE_1M_MASK | \
180 IWL_RATE_2M_MASK)
181
182#define IWL_CCK_RATES_MASK \
183 (IWL_CCK_BASIC_RATES_MASK | \
184 IWL_RATE_5M_MASK | \
185 IWL_RATE_11M_MASK)
186
187#define IWL_OFDM_BASIC_RATES_MASK \
188 (IWL_RATE_6M_MASK | \
189 IWL_RATE_12M_MASK | \
190 IWL_RATE_24M_MASK)
191
192#define IWL_OFDM_RATES_MASK \
193 (IWL_OFDM_BASIC_RATES_MASK | \
194 IWL_RATE_9M_MASK | \
195 IWL_RATE_18M_MASK | \
196 IWL_RATE_36M_MASK | \
197 IWL_RATE_48M_MASK | \
198 IWL_RATE_54M_MASK)
199
200#define IWL_BASIC_RATES_MASK \
201 (IWL_OFDM_BASIC_RATES_MASK | \
202 IWL_CCK_BASIC_RATES_MASK)
203
204#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
205#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
206
207#define IWL_INVALID_VALUE -1
208
209#define IWL_MIN_RSSI_VAL -100
210#define IWL_MAX_RSSI_VAL 0
211
212/* These values specify how many Tx frame attempts before
213 * searching for a new modulation mode */
214#define IWL_LEGACY_FAILURE_LIMIT 160
215#define IWL_LEGACY_SUCCESS_LIMIT 480
216#define IWL_LEGACY_TABLE_COUNT 160
217
218#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
219#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
220#define IWL_NONE_LEGACY_TABLE_COUNT 1500
221
222/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
223#define IWL_RS_GOOD_RATIO 12800 /* 100% */
224#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
225#define IWL_RATE_HIGH_TH 10880 /* 85% */
226#define IWL_RATE_INCREASE_TH 6400 /* 50% */
227#define IWL_RATE_DECREASE_TH 1920 /* 15% */
228
229/* possible actions when in legacy mode */
230#define IWL_LEGACY_SWITCH_ANTENNA1 0
231#define IWL_LEGACY_SWITCH_ANTENNA2 1
232#define IWL_LEGACY_SWITCH_SISO 2
233#define IWL_LEGACY_SWITCH_MIMO2_AB 3
234#define IWL_LEGACY_SWITCH_MIMO2_AC 4
235#define IWL_LEGACY_SWITCH_MIMO2_BC 5
236
237/* possible actions when in siso mode */
238#define IWL_SISO_SWITCH_ANTENNA1 0
239#define IWL_SISO_SWITCH_ANTENNA2 1
240#define IWL_SISO_SWITCH_MIMO2_AB 2
241#define IWL_SISO_SWITCH_MIMO2_AC 3
242#define IWL_SISO_SWITCH_MIMO2_BC 4
243#define IWL_SISO_SWITCH_GI 5
244
245/* possible actions when in mimo mode */
246#define IWL_MIMO2_SWITCH_ANTENNA1 0
247#define IWL_MIMO2_SWITCH_ANTENNA2 1
248#define IWL_MIMO2_SWITCH_SISO_A 2
249#define IWL_MIMO2_SWITCH_SISO_B 3
250#define IWL_MIMO2_SWITCH_SISO_C 4
251#define IWL_MIMO2_SWITCH_GI 5
252
253#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
254
255#define IWL_ACTION_LIMIT 3 /* # possible actions */
256
257#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
258
259/* load per tid defines for A-MPDU activation */
260#define IWL_AGG_TPT_THREHOLD 0
261#define IWL_AGG_LOAD_THRESHOLD 10
262#define IWL_AGG_ALL_TID 0xff
263#define TID_QUEUE_CELL_SPACING 50 /*mS */
264#define TID_QUEUE_MAX_SIZE 20
265#define TID_ROUND_VALUE 5 /* mS */
266#define TID_MAX_LOAD_COUNT 8
267
268#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
269#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
270
271extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
272
273enum iwl_table_type {
274 LQ_NONE,
275 LQ_G, /* legacy types */
276 LQ_A,
277 LQ_SISO, /* high-throughput types */
278 LQ_MIMO2,
279 LQ_MAX,
280};
281
282#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
283#define is_siso(tbl) ((tbl) == LQ_SISO)
284#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
285#define is_mimo(tbl) (is_mimo2(tbl))
286#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
287#define is_a_band(tbl) ((tbl) == LQ_A)
288#define is_g_and(tbl) ((tbl) == LQ_G)
289
290#define ANT_NONE 0x0
291#define ANT_A BIT(0)
292#define ANT_B BIT(1)
293#define ANT_AB (ANT_A | ANT_B)
294#define ANT_C BIT(2)
295#define ANT_AC (ANT_A | ANT_C)
296#define ANT_BC (ANT_B | ANT_C)
297#define ANT_ABC (ANT_AB | ANT_C)
298
299#define IWL_MAX_MCS_DISPLAY_SIZE 12
300
301struct iwl_rate_mcs_info {
302 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
303 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
304};
305
306/**
307 * struct iwl_rate_scale_data -- tx success history for one rate
308 */
309struct iwl_rate_scale_data {
310 u64 data; /* bitmap of successful frames */
311 s32 success_counter; /* number of frames successful */
312 s32 success_ratio; /* per-cent * 128 */
313 s32 counter; /* number of frames attempted */
314 s32 average_tpt; /* success ratio * expected throughput */
315 unsigned long stamp;
316};
317
318/**
319 * struct iwl_scale_tbl_info -- tx params and success history for all rates
320 *
321 * There are two of these in struct iwl_lq_sta,
322 * one for "active", and one for "search".
323 */
324struct iwl_scale_tbl_info {
325 enum iwl_table_type lq_type;
326 u8 ant_type;
327 u8 is_SGI; /* 1 = short guard interval */
328 u8 is_ht40; /* 1 = 40 MHz channel width */
329 u8 is_dup; /* 1 = duplicated data streams */
330 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
331 u8 max_search; /* maximun number of tables we can search */
332 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
333 u32 current_rate; /* rate_n_flags, uCode API format */
334 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
335};
336
337struct iwl_traffic_load {
338 unsigned long time_stamp; /* age of the oldest statistics */
339 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
340 * slice */
341 u32 total; /* total num of packets during the
342 * last TID_MAX_TIME_DIFF */
343 u8 queue_count; /* number of queues that has
344 * been used since the last cleanup */
345 u8 head; /* start of the circular buffer */
346};
347
348/**
349 * struct iwl_lq_sta -- driver's rate scaling private structure
350 *
351 * Pointer to this gets passed back and forth between driver and mac80211.
352 */
353struct iwl_lq_sta {
354 u8 active_tbl; /* index of active table, range 0-1 */
355 u8 enable_counter; /* indicates HT mode */
356 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
357 u8 search_better_tbl; /* 1: currently trying alternate mode */
358 s32 last_tpt;
359
360 /* The following determine when to search for a new mode */
361 u32 table_count_limit;
362 u32 max_failure_limit; /* # failed frames before new search */
363 u32 max_success_limit; /* # successful frames before new search */
364 u32 table_count;
365 u32 total_failed; /* total failed frames, any/all rates */
366 u32 total_success; /* total successful frames, any/all rates */
367 u64 flush_timer; /* time staying in mode before new search */
368
369 u8 action_counter; /* # mode-switch actions tried */
370 u8 is_green;
371 u8 is_dup;
372 enum ieee80211_band band;
373
374 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
375 u32 supp_rates;
376 u16 active_legacy_rate;
377 u16 active_siso_rate;
378 u16 active_mimo2_rate;
379 s8 max_rate_idx; /* Max rate set by user */
380 u8 missed_rate_counter;
381
382 struct iwl_link_quality_cmd lq;
383 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
384 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
385 u8 tx_agg_tid_en;
386#ifdef CONFIG_MAC80211_DEBUGFS
387 struct dentry *rs_sta_dbgfs_scale_table_file;
388 struct dentry *rs_sta_dbgfs_stats_table_file;
389 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
390 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
391 u32 dbg_fixed_rate;
392#endif
393 struct iwl_priv *drv;
394
395 /* used to be in sta_info */
396 int last_txrate_idx;
397 /* last tx rate_n_flags */
398 u32 last_rate_n_flags;
399 /* packets destined for this STA are aggregated */
400 u8 is_agg;
401};
402
403static inline u8 iwl4965_num_of_ant(u8 mask)
404{
405 return !!((mask) & ANT_A) +
406 !!((mask) & ANT_B) +
407 !!((mask) & ANT_C);
408}
409
410static inline u8 iwl4965_first_antenna(u8 mask)
411{
412 if (mask & ANT_A)
413 return ANT_A;
414 if (mask & ANT_B)
415 return ANT_B;
416 return ANT_C;
417}
418
419
420/**
421 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
422 *
423 * The specific throughput table used is based on the type of network
424 * the associated with, including A, B, G, and G w/ TGG protection
425 */
426extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
427
428/* Initialize station's rate scaling information after adding station */
429extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
430 struct ieee80211_sta *sta, u8 sta_id);
431extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
432 struct ieee80211_sta *sta, u8 sta_id);
433
434/**
435 * iwl_rate_control_register - Register the rate control algorithm callbacks
436 *
437 * Since the rate control algorithm is hardware specific, there is no need
438 * or reason to place it as a stand alone module. The driver can call
439 * iwl_rate_control_register in order to register the rate control callbacks
440 * with the mac80211 subsystem. This should be performed prior to calling
441 * ieee80211_register_hw
442 *
443 */
444extern int iwl4965_rate_control_register(void);
445extern int iwl3945_rate_control_register(void);
446
447/**
448 * iwl_rate_control_unregister - Unregister the rate control callbacks
449 *
450 * This should be called after calling ieee80211_unregister_hw, but before
451 * the driver is unloaded.
452 */
453extern void iwl4965_rate_control_unregister(void);
454extern void iwl3945_rate_control_unregister(void);
455
456#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
deleted file mode 100644
index 903ef0d6d6cb..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-power.h"
44
45/*
46 * Setting power level allows the card to go to sleep when not busy.
47 *
48 * We calculate a sleep command based on the required latency, which
49 * we get from mac80211. In order to handle thermal throttling, we can
50 * also use pre-defined power levels.
51 */
52
53/*
54 * This defines the old power levels. They are still used by default
55 * (level 1) and for thermal throttle (levels 3 through 5)
56 */
57
58struct iwl_power_vec_entry {
59 struct iwl_powertable_cmd cmd;
60 u8 no_dtim; /* number of skip dtim */
61};
62
63static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
64 struct iwl_powertable_cmd *cmd)
65{
66 memset(cmd, 0, sizeof(*cmd));
67
68 if (priv->power_data.pci_pm)
69 cmd->flags |= IWL_POWER_PCI_PM_MSK;
70
71 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
72}
73
74static int
75iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
76{
77 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
78 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
79 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
80 le32_to_cpu(cmd->tx_data_timeout));
81 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
82 le32_to_cpu(cmd->rx_data_timeout));
83 IWL_DEBUG_POWER(priv,
84 "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
85 le32_to_cpu(cmd->sleep_interval[0]),
86 le32_to_cpu(cmd->sleep_interval[1]),
87 le32_to_cpu(cmd->sleep_interval[2]),
88 le32_to_cpu(cmd->sleep_interval[3]),
89 le32_to_cpu(cmd->sleep_interval[4]));
90
91 return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
92 sizeof(struct iwl_powertable_cmd), cmd);
93}
94
95int
96iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
97 bool force)
98{
99 int ret;
100 bool update_chains;
101
102 lockdep_assert_held(&priv->mutex);
103
104 /* Don't update the RX chain when chain noise calibration is running */
105 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
106 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
107
108 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
109 return 0;
110
111 if (!iwl_legacy_is_ready_rf(priv))
112 return -EIO;
113
114 /* scan complete use sleep_power_next, need to be updated */
115 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
116 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
117 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
118 return 0;
119 }
120
121 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
122 set_bit(STATUS_POWER_PMI, &priv->status);
123
124 ret = iwl_legacy_set_power(priv, cmd);
125 if (!ret) {
126 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
127 clear_bit(STATUS_POWER_PMI, &priv->status);
128
129 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
130 priv->cfg->ops->lib->update_chain_flags(priv);
131 else if (priv->cfg->ops->lib->update_chain_flags)
132 IWL_DEBUG_POWER(priv,
133 "Cannot update the power, chain noise "
134 "calibration running: %d\n",
135 priv->chain_noise_data.state);
136
137 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
138 } else
139 IWL_ERR(priv, "set power fail, ret = %d", ret);
140
141 return ret;
142}
143
144int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
145{
146 struct iwl_powertable_cmd cmd;
147
148 iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
149 return iwl_legacy_power_set_mode(priv, &cmd, force);
150}
151EXPORT_SYMBOL(iwl_legacy_power_update_mode);
152
153/* initialize to default */
154void iwl_legacy_power_initialize(struct iwl_priv *priv)
155{
156 u16 lctl = iwl_legacy_pcie_link_ctl(priv);
157
158 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
159
160 priv->power_data.debug_sleep_level_override = -1;
161
162 memset(&priv->power_data.sleep_cmd, 0,
163 sizeof(priv->power_data.sleep_cmd));
164}
165EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
deleted file mode 100644
index d30b36acdc4a..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_legacy_power_setting_h__
29#define __iwl_legacy_power_setting_h__
30
31#include "iwl-commands.h"
32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
45 int debug_sleep_level_override;
46 bool pci_pm;
47};
48
49int
50iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
51 bool force);
52int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
53void iwl_legacy_power_initialize(struct iwl_priv *priv);
54
55#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
deleted file mode 100644
index 9b5d0abe8be9..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-rx.c
+++ /dev/null
@@ -1,281 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <net/mac80211.h>
33#include <asm/unaligned.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_legacy_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
122
123/**
124 * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
126void
127iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
128 struct iwl_rx_queue *q)
129{
130 unsigned long flags;
131 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
132 u32 reg;
133
134 spin_lock_irqsave(&q->lock, flags);
135
136 if (q->need_update == 0)
137 goto exit_unlock;
138
139 /* If power-saving is in use, make sure device is awake */
140 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
141 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
142
143 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
144 IWL_DEBUG_INFO(priv,
145 "Rx queue requesting wakeup,"
146 " GP1 = 0x%x\n", reg);
147 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
148 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
149 goto exit_unlock;
150 }
151
152 q->write_actual = (q->write & ~0x7);
153 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
154 q->write_actual);
155
156 /* Else device is assumed to be awake */
157 } else {
158 /* Device expects a multiple of 8 */
159 q->write_actual = (q->write & ~0x7);
160 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
161 q->write_actual);
162 }
163
164 q->need_update = 0;
165
166 exit_unlock:
167 spin_unlock_irqrestore(&q->lock, flags);
168}
169EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
170
171int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
172{
173 struct iwl_rx_queue *rxq = &priv->rxq;
174 struct device *dev = &priv->pci_dev->dev;
175 int i;
176
177 spin_lock_init(&rxq->lock);
178 INIT_LIST_HEAD(&rxq->rx_free);
179 INIT_LIST_HEAD(&rxq->rx_used);
180
181 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
182 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
183 GFP_KERNEL);
184 if (!rxq->bd)
185 goto err_bd;
186
187 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
188 &rxq->rb_stts_dma, GFP_KERNEL);
189 if (!rxq->rb_stts)
190 goto err_rb;
191
192 /* Fill the rx_used queue with _all_ of the Rx buffers */
193 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
194 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
195
196 /* Set us so that we have processed and used all buffers, but have
197 * not restocked the Rx queue with fresh buffers */
198 rxq->read = rxq->write = 0;
199 rxq->write_actual = 0;
200 rxq->free_count = 0;
201 rxq->need_update = 0;
202 return 0;
203
204err_rb:
205 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
206 rxq->bd_dma);
207err_bd:
208 return -ENOMEM;
209}
210EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
211
212
213void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb)
215{
216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
217 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
218
219 if (!report->state) {
220 IWL_DEBUG_11H(priv,
221 "Spectrum Measure Notification: Start\n");
222 return;
223 }
224
225 memcpy(&priv->measure_report, report, sizeof(*report));
226 priv->measurement_status |= MEASUREMENT_READY;
227}
228EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
229
230/*
231 * returns non-zero if packet should be dropped
232 */
233int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
234 struct ieee80211_hdr *hdr,
235 u32 decrypt_res,
236 struct ieee80211_rx_status *stats)
237{
238 u16 fc = le16_to_cpu(hdr->frame_control);
239
240 /*
241 * All contexts have the same setting here due to it being
242 * a module parameter, so OK to check any context.
243 */
244 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
245 RXON_FILTER_DIS_DECRYPT_MSK)
246 return 0;
247
248 if (!(fc & IEEE80211_FCTL_PROTECTED))
249 return 0;
250
251 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
252 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
253 case RX_RES_STATUS_SEC_TYPE_TKIP:
254 /* The uCode has got a bad phase 1 Key, pushes the packet.
255 * Decryption will be done in SW. */
256 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
257 RX_RES_STATUS_BAD_KEY_TTAK)
258 break;
259
260 case RX_RES_STATUS_SEC_TYPE_WEP:
261 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
262 RX_RES_STATUS_BAD_ICV_MIC) {
263 /* bad ICV, the packet is destroyed since the
264 * decryption is inplace, drop it */
265 IWL_DEBUG_RX(priv, "Packet destroyed\n");
266 return -1;
267 }
268 case RX_RES_STATUS_SEC_TYPE_CCMP:
269 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
270 RX_RES_STATUS_DECRYPT_OK) {
271 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
272 stats->flag |= RX_FLAG_DECRYPTED;
273 }
274 break;
275
276 default:
277 break;
278 }
279 return 0;
280}
281EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
deleted file mode 100644
index a6b5222fc59e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ /dev/null
@@ -1,549 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
40/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
41 * sending probe req. This should be set long enough to hear probe responses
42 * from more than one AP. */
43#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
44#define IWL_ACTIVE_DWELL_TIME_52 (20)
45
46#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
47#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
48
49/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
50 * Must be set longer than active dwell time.
51 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
52#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
53#define IWL_PASSIVE_DWELL_TIME_52 (10)
54#define IWL_PASSIVE_DWELL_BASE (100)
55#define IWL_CHANNEL_TUNE_TIME 5
56
57static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_rx_packet *pkt;
61 struct iwl_host_cmd cmd = {
62 .id = REPLY_SCAN_ABORT_CMD,
63 .flags = CMD_WANT_SKB,
64 };
65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status) ||
73 test_bit(STATUS_EXIT_PENDING, &priv->status))
74 return -EIO;
75
76 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
77 if (ret)
78 return ret;
79
80 pkt = (struct iwl_rx_packet *)cmd.reply_page;
81 if (pkt->u.status != CAN_ABORT_STATUS) {
82 /* The scan abort will return 1 for success or
83 * 2 for "failure". A failure condition can be
84 * due to simply not being in an active scan which
85 * can occur if we send the scan abort before we
86 * the microcode has notified us that a scan is
87 * completed. */
88 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
89 ret = -EIO;
90 }
91
92 iwl_legacy_free_pages(priv, cmd.reply_page);
93 return ret;
94}
95
96static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
97{
98 /* check if scan was requested from mac80211 */
99 if (priv->scan_request) {
100 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
101 ieee80211_scan_completed(priv->hw, aborted);
102 }
103
104 priv->scan_vif = NULL;
105 priv->scan_request = NULL;
106}
107
108void iwl_legacy_force_scan_end(struct iwl_priv *priv)
109{
110 lockdep_assert_held(&priv->mutex);
111
112 if (!test_bit(STATUS_SCANNING, &priv->status)) {
113 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
114 return;
115 }
116
117 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
118 clear_bit(STATUS_SCANNING, &priv->status);
119 clear_bit(STATUS_SCAN_HW, &priv->status);
120 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
121 iwl_legacy_complete_scan(priv, true);
122}
123
124static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
125{
126 int ret;
127
128 lockdep_assert_held(&priv->mutex);
129
130 if (!test_bit(STATUS_SCANNING, &priv->status)) {
131 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
132 return;
133 }
134
135 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
136 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
137 return;
138 }
139
140 ret = iwl_legacy_send_scan_abort(priv);
141 if (ret) {
142 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
143 iwl_legacy_force_scan_end(priv);
144 } else
145 IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
146}
147
148/**
149 * iwl_scan_cancel - Cancel any currently executing HW scan
150 */
151int iwl_legacy_scan_cancel(struct iwl_priv *priv)
152{
153 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
154 queue_work(priv->workqueue, &priv->abort_scan);
155 return 0;
156}
157EXPORT_SYMBOL(iwl_legacy_scan_cancel);
158
159/**
160 * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
161 * @ms: amount of time to wait (in milliseconds) for scan to abort
162 *
163 */
164int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
165{
166 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
167
168 lockdep_assert_held(&priv->mutex);
169
170 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
171
172 iwl_legacy_do_scan_abort(priv);
173
174 while (time_before_eq(jiffies, timeout)) {
175 if (!test_bit(STATUS_SCAN_HW, &priv->status))
176 break;
177 msleep(20);
178 }
179
180 return test_bit(STATUS_SCAN_HW, &priv->status);
181}
182EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
183
184/* Service response to REPLY_SCAN_CMD (0x80) */
185static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
186 struct iwl_rx_mem_buffer *rxb)
187{
188#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
189 struct iwl_rx_packet *pkt = rxb_addr(rxb);
190 struct iwl_scanreq_notification *notif =
191 (struct iwl_scanreq_notification *)pkt->u.raw;
192
193 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
194#endif
195}
196
197/* Service SCAN_START_NOTIFICATION (0x82) */
198static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
199 struct iwl_rx_mem_buffer *rxb)
200{
201 struct iwl_rx_packet *pkt = rxb_addr(rxb);
202 struct iwl_scanstart_notification *notif =
203 (struct iwl_scanstart_notification *)pkt->u.raw;
204 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
205 IWL_DEBUG_SCAN(priv, "Scan start: "
206 "%d [802.11%s] "
207 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
208 notif->channel,
209 notif->band ? "bg" : "a",
210 le32_to_cpu(notif->tsf_high),
211 le32_to_cpu(notif->tsf_low),
212 notif->status, notif->beacon_timer);
213}
214
215/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
216static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
217 struct iwl_rx_mem_buffer *rxb)
218{
219#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
220 struct iwl_rx_packet *pkt = rxb_addr(rxb);
221 struct iwl_scanresults_notification *notif =
222 (struct iwl_scanresults_notification *)pkt->u.raw;
223
224 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
225 "%d [802.11%s] "
226 "(TSF: 0x%08X:%08X) - %d "
227 "elapsed=%lu usec\n",
228 notif->channel,
229 notif->band ? "bg" : "a",
230 le32_to_cpu(notif->tsf_high),
231 le32_to_cpu(notif->tsf_low),
232 le32_to_cpu(notif->statistics[0]),
233 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
234#endif
235}
236
237/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
238static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
239 struct iwl_rx_mem_buffer *rxb)
240{
241
242#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
243 struct iwl_rx_packet *pkt = rxb_addr(rxb);
244 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
245#endif
246
247 IWL_DEBUG_SCAN(priv,
248 "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
249 scan_notif->scanned_channels,
250 scan_notif->tsf_low,
251 scan_notif->tsf_high, scan_notif->status);
252
253 /* The HW is no longer scanning */
254 clear_bit(STATUS_SCAN_HW, &priv->status);
255
256 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
257 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
258 jiffies_to_msecs(jiffies - priv->scan_start));
259
260 queue_work(priv->workqueue, &priv->scan_completed);
261}
262
263void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
264{
265 /* scan handlers */
266 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
267 priv->rx_handlers[SCAN_START_NOTIFICATION] =
268 iwl_legacy_rx_scan_start_notif;
269 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
270 iwl_legacy_rx_scan_results_notif;
271 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
272 iwl_legacy_rx_scan_complete_notif;
273}
274EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
275
276inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
277 enum ieee80211_band band,
278 u8 n_probes)
279{
280 if (band == IEEE80211_BAND_5GHZ)
281 return IWL_ACTIVE_DWELL_TIME_52 +
282 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
283 else
284 return IWL_ACTIVE_DWELL_TIME_24 +
285 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
286}
287EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
288
289u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
290 enum ieee80211_band band,
291 struct ieee80211_vif *vif)
292{
293 struct iwl_rxon_context *ctx;
294 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
295 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
296 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
297
298 if (iwl_legacy_is_any_associated(priv)) {
299 /*
300 * If we're associated, we clamp the maximum passive
301 * dwell time to be 98% of the smallest beacon interval
302 * (minus 2 * channel tune time)
303 */
304 for_each_context(priv, ctx) {
305 u16 value;
306
307 if (!iwl_legacy_is_associated_ctx(ctx))
308 continue;
309 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
310 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
311 value = IWL_PASSIVE_DWELL_BASE;
312 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
313 passive = min(value, passive);
314 }
315 }
316
317 return passive;
318}
319EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
320
321void iwl_legacy_init_scan_params(struct iwl_priv *priv)
322{
323 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
324 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
325 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
326 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
327 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
328}
329EXPORT_SYMBOL(iwl_legacy_init_scan_params);
330
331static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
332 struct ieee80211_vif *vif)
333{
334 int ret;
335
336 lockdep_assert_held(&priv->mutex);
337
338 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
339 return -EOPNOTSUPP;
340
341 cancel_delayed_work(&priv->scan_check);
342
343 if (!iwl_legacy_is_ready_rf(priv)) {
344 IWL_WARN(priv, "Request scan called when driver not ready.\n");
345 return -EIO;
346 }
347
348 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
349 IWL_DEBUG_SCAN(priv,
350 "Multiple concurrent scan requests in parallel.\n");
351 return -EBUSY;
352 }
353
354 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
355 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
356 return -EBUSY;
357 }
358
359 IWL_DEBUG_SCAN(priv, "Starting scan...\n");
360
361 set_bit(STATUS_SCANNING, &priv->status);
362 priv->scan_start = jiffies;
363
364 ret = priv->cfg->ops->utils->request_scan(priv, vif);
365 if (ret) {
366 clear_bit(STATUS_SCANNING, &priv->status);
367 return ret;
368 }
369
370 queue_delayed_work(priv->workqueue, &priv->scan_check,
371 IWL_SCAN_CHECK_WATCHDOG);
372
373 return 0;
374}
375
376int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
377 struct ieee80211_vif *vif,
378 struct cfg80211_scan_request *req)
379{
380 struct iwl_priv *priv = hw->priv;
381 int ret;
382
383 IWL_DEBUG_MAC80211(priv, "enter\n");
384
385 if (req->n_channels == 0)
386 return -EINVAL;
387
388 mutex_lock(&priv->mutex);
389
390 if (test_bit(STATUS_SCANNING, &priv->status)) {
391 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
392 ret = -EAGAIN;
393 goto out_unlock;
394 }
395
396 /* mac80211 will only ask for one band at a time */
397 priv->scan_request = req;
398 priv->scan_vif = vif;
399 priv->scan_band = req->channels[0]->band;
400
401 ret = iwl_legacy_scan_initiate(priv, vif);
402
403 IWL_DEBUG_MAC80211(priv, "leave\n");
404
405out_unlock:
406 mutex_unlock(&priv->mutex);
407
408 return ret;
409}
410EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
411
412static void iwl_legacy_bg_scan_check(struct work_struct *data)
413{
414 struct iwl_priv *priv =
415 container_of(data, struct iwl_priv, scan_check.work);
416
417 IWL_DEBUG_SCAN(priv, "Scan check work\n");
418
419 /* Since we are here firmware does not finish scan and
420 * most likely is in bad shape, so we don't bother to
421 * send abort command, just force scan complete to mac80211 */
422 mutex_lock(&priv->mutex);
423 iwl_legacy_force_scan_end(priv);
424 mutex_unlock(&priv->mutex);
425}
426
427/**
428 * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
429 */
430
431u16
432iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
433 const u8 *ta, const u8 *ies, int ie_len, int left)
434{
435 int len = 0;
436 u8 *pos = NULL;
437
438 /* Make sure there is enough space for the probe request,
439 * two mandatory IEs and the data */
440 left -= 24;
441 if (left < 0)
442 return 0;
443
444 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
445 memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
446 memcpy(frame->sa, ta, ETH_ALEN);
447 memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
448 frame->seq_ctrl = 0;
449
450 len += 24;
451
452 /* ...next IE... */
453 pos = &frame->u.probe_req.variable[0];
454
455 /* fill in our indirect SSID IE */
456 left -= 2;
457 if (left < 0)
458 return 0;
459 *pos++ = WLAN_EID_SSID;
460 *pos++ = 0;
461
462 len += 2;
463
464 if (WARN_ON(left < ie_len))
465 return len;
466
467 if (ies && ie_len) {
468 memcpy(pos, ies, ie_len);
469 len += ie_len;
470 }
471
472 return (u16)len;
473}
474EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
475
476static void iwl_legacy_bg_abort_scan(struct work_struct *work)
477{
478 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
479
480 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
481
482 /* We keep scan_check work queued in case when firmware will not
483 * report back scan completed notification */
484 mutex_lock(&priv->mutex);
485 iwl_legacy_scan_cancel_timeout(priv, 200);
486 mutex_unlock(&priv->mutex);
487}
488
489static void iwl_legacy_bg_scan_completed(struct work_struct *work)
490{
491 struct iwl_priv *priv =
492 container_of(work, struct iwl_priv, scan_completed);
493 bool aborted;
494
495 IWL_DEBUG_SCAN(priv, "Completed scan.\n");
496
497 cancel_delayed_work(&priv->scan_check);
498
499 mutex_lock(&priv->mutex);
500
501 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
502 if (aborted)
503 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
504
505 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
506 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
507 goto out_settings;
508 }
509
510 iwl_legacy_complete_scan(priv, aborted);
511
512out_settings:
513 /* Can we still talk to firmware ? */
514 if (!iwl_legacy_is_ready_rf(priv))
515 goto out;
516
517 /*
518 * We do not commit power settings while scan is pending,
519 * do it now if the settings changed.
520 */
521 iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
522 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
523
524 priv->cfg->ops->utils->post_scan(priv);
525
526out:
527 mutex_unlock(&priv->mutex);
528}
529
530void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
531{
532 INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
533 INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
534 INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
535}
536EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
537
538void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
539{
540 cancel_work_sync(&priv->abort_scan);
541 cancel_work_sync(&priv->scan_completed);
542
543 if (cancel_delayed_work_sync(&priv->scan_check)) {
544 mutex_lock(&priv->mutex);
545 iwl_legacy_force_scan_end(priv);
546 mutex_unlock(&priv->mutex);
547 }
548}
549EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
index 9f70a4723103..85fe48e520f9 100644
--- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -26,8 +26,8 @@
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
28 28
29#ifndef __iwl_legacy_spectrum_h__ 29#ifndef __il_spectrum_h__
30#define __iwl_legacy_spectrum_h__ 30#define __il_spectrum_h__
31enum { /* ieee80211_basic_report.map */ 31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0), 32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1), 33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
index 66f0fb2bbe00..75fe315f66b4 100644
--- a/drivers/net/wireless/iwlegacy/iwl-sta.c
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -31,81 +31,82 @@
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/lockdep.h> 33#include <linux/lockdep.h>
34#include <linux/export.h>
34 35
35#include "iwl-dev.h" 36#include "iwl-dev.h"
36#include "iwl-core.h" 37#include "iwl-core.h"
37#include "iwl-sta.h" 38#include "iwl-sta.h"
38 39
39/* priv->sta_lock must be held */ 40/* il->sta_lock must be held */
40static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) 41static void il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
41{ 42{
42 43
43 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) 44 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
44 IWL_ERR(priv, 45 IL_ERR(
45 "ACTIVATE a non DRIVER active station id %u addr %pM\n", 46 "ACTIVATE a non DRIVER active station id %u addr %pM\n",
46 sta_id, priv->stations[sta_id].sta.sta.addr); 47 sta_id, il->stations[sta_id].sta.sta.addr);
47 48
48 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { 49 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
49 IWL_DEBUG_ASSOC(priv, 50 D_ASSOC(
50 "STA id %u addr %pM already present" 51 "STA id %u addr %pM already present"
51 " in uCode (according to driver)\n", 52 " in uCode (according to driver)\n",
52 sta_id, priv->stations[sta_id].sta.sta.addr); 53 sta_id, il->stations[sta_id].sta.sta.addr);
53 } else { 54 } else {
54 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; 55 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
55 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", 56 D_ASSOC("Added STA id %u addr %pM to uCode\n",
56 sta_id, priv->stations[sta_id].sta.sta.addr); 57 sta_id, il->stations[sta_id].sta.sta.addr);
57 } 58 }
58} 59}
59 60
60static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv, 61static int il_process_add_sta_resp(struct il_priv *il,
61 struct iwl_legacy_addsta_cmd *addsta, 62 struct il_addsta_cmd *addsta,
62 struct iwl_rx_packet *pkt, 63 struct il_rx_pkt *pkt,
63 bool sync) 64 bool sync)
64{ 65{
65 u8 sta_id = addsta->sta.sta_id; 66 u8 sta_id = addsta->sta.sta_id;
66 unsigned long flags; 67 unsigned long flags;
67 int ret = -EIO; 68 int ret = -EIO;
68 69
69 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 70 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
70 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 71 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n",
71 pkt->hdr.flags); 72 pkt->hdr.flags);
72 return ret; 73 return ret;
73 } 74 }
74 75
75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", 76 D_INFO("Processing response for adding station %u\n",
76 sta_id); 77 sta_id);
77 78
78 spin_lock_irqsave(&priv->sta_lock, flags); 79 spin_lock_irqsave(&il->sta_lock, flags);
79 80
80 switch (pkt->u.add_sta.status) { 81 switch (pkt->u.add_sta.status) {
81 case ADD_STA_SUCCESS_MSK: 82 case ADD_STA_SUCCESS_MSK:
82 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); 83 D_INFO("C_ADD_STA PASSED\n");
83 iwl_legacy_sta_ucode_activate(priv, sta_id); 84 il_sta_ucode_activate(il, sta_id);
84 ret = 0; 85 ret = 0;
85 break; 86 break;
86 case ADD_STA_NO_ROOM_IN_TABLE: 87 case ADD_STA_NO_ROOM_IN_TBL:
87 IWL_ERR(priv, "Adding station %d failed, no room in table.\n", 88 IL_ERR("Adding station %d failed, no room in table.\n",
88 sta_id); 89 sta_id);
89 break; 90 break;
90 case ADD_STA_NO_BLOCK_ACK_RESOURCE: 91 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
91 IWL_ERR(priv, 92 IL_ERR(
92 "Adding station %d failed, no block ack resource.\n", 93 "Adding station %d failed, no block ack resource.\n",
93 sta_id); 94 sta_id);
94 break; 95 break;
95 case ADD_STA_MODIFY_NON_EXIST_STA: 96 case ADD_STA_MODIFY_NON_EXIST_STA:
96 IWL_ERR(priv, "Attempting to modify non-existing station %d\n", 97 IL_ERR("Attempting to modify non-existing station %d\n",
97 sta_id); 98 sta_id);
98 break; 99 break;
99 default: 100 default:
100 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", 101 D_ASSOC("Received C_ADD_STA:(0x%08X)\n",
101 pkt->u.add_sta.status); 102 pkt->u.add_sta.status);
102 break; 103 break;
103 } 104 }
104 105
105 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", 106 D_INFO("%s station id %u addr %pM\n",
106 priv->stations[sta_id].sta.mode == 107 il->stations[sta_id].sta.mode ==
107 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 108 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
108 sta_id, priv->stations[sta_id].sta.sta.addr); 109 sta_id, il->stations[sta_id].sta.sta.addr);
109 110
110 /* 111 /*
111 * XXX: The MAC address in the command buffer is often changed from 112 * XXX: The MAC address in the command buffer is often changed from
@@ -115,68 +116,68 @@ static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
115 * issue has not yet been resolved and this debugging is left to 116 * issue has not yet been resolved and this debugging is left to
116 * observe the problem. 117 * observe the problem.
117 */ 118 */
118 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", 119 D_INFO("%s station according to cmd buffer %pM\n",
119 priv->stations[sta_id].sta.mode == 120 il->stations[sta_id].sta.mode ==
120 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 121 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
121 addsta->sta.addr); 122 addsta->sta.addr);
122 spin_unlock_irqrestore(&priv->sta_lock, flags); 123 spin_unlock_irqrestore(&il->sta_lock, flags);
123 124
124 return ret; 125 return ret;
125} 126}
126 127
127static void iwl_legacy_add_sta_callback(struct iwl_priv *priv, 128static void il_add_sta_callback(struct il_priv *il,
128 struct iwl_device_cmd *cmd, 129 struct il_device_cmd *cmd,
129 struct iwl_rx_packet *pkt) 130 struct il_rx_pkt *pkt)
130{ 131{
131 struct iwl_legacy_addsta_cmd *addsta = 132 struct il_addsta_cmd *addsta =
132 (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload; 133 (struct il_addsta_cmd *)cmd->cmd.payload;
133 134
134 iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false); 135 il_process_add_sta_resp(il, addsta, pkt, false);
135 136
136} 137}
137 138
138int iwl_legacy_send_add_sta(struct iwl_priv *priv, 139int il_send_add_sta(struct il_priv *il,
139 struct iwl_legacy_addsta_cmd *sta, u8 flags) 140 struct il_addsta_cmd *sta, u8 flags)
140{ 141{
141 struct iwl_rx_packet *pkt = NULL; 142 struct il_rx_pkt *pkt = NULL;
142 int ret = 0; 143 int ret = 0;
143 u8 data[sizeof(*sta)]; 144 u8 data[sizeof(*sta)];
144 struct iwl_host_cmd cmd = { 145 struct il_host_cmd cmd = {
145 .id = REPLY_ADD_STA, 146 .id = C_ADD_STA,
146 .flags = flags, 147 .flags = flags,
147 .data = data, 148 .data = data,
148 }; 149 };
149 u8 sta_id __maybe_unused = sta->sta.sta_id; 150 u8 sta_id __maybe_unused = sta->sta.sta_id;
150 151
151 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", 152 D_INFO("Adding sta %u (%pM) %ssynchronously\n",
152 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); 153 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
153 154
154 if (flags & CMD_ASYNC) 155 if (flags & CMD_ASYNC)
155 cmd.callback = iwl_legacy_add_sta_callback; 156 cmd.callback = il_add_sta_callback;
156 else { 157 else {
157 cmd.flags |= CMD_WANT_SKB; 158 cmd.flags |= CMD_WANT_SKB;
158 might_sleep(); 159 might_sleep();
159 } 160 }
160 161
161 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); 162 cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
162 ret = iwl_legacy_send_cmd(priv, &cmd); 163 ret = il_send_cmd(il, &cmd);
163 164
164 if (ret || (flags & CMD_ASYNC)) 165 if (ret || (flags & CMD_ASYNC))
165 return ret; 166 return ret;
166 167
167 if (ret == 0) { 168 if (ret == 0) {
168 pkt = (struct iwl_rx_packet *)cmd.reply_page; 169 pkt = (struct il_rx_pkt *)cmd.reply_page;
169 ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true); 170 ret = il_process_add_sta_resp(il, sta, pkt, true);
170 } 171 }
171 iwl_legacy_free_pages(priv, cmd.reply_page); 172 il_free_pages(il, cmd.reply_page);
172 173
173 return ret; 174 return ret;
174} 175}
175EXPORT_SYMBOL(iwl_legacy_send_add_sta); 176EXPORT_SYMBOL(il_send_add_sta);
176 177
177static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index, 178static void il_set_ht_add_station(struct il_priv *il, u8 idx,
178 struct ieee80211_sta *sta, 179 struct ieee80211_sta *sta,
179 struct iwl_rxon_context *ctx) 180 struct il_rxon_context *ctx)
180{ 181{
181 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; 182 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
182 __le32 sta_flags; 183 __le32 sta_flags;
@@ -186,13 +187,13 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
186 goto done; 187 goto done;
187 188
188 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; 189 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
189 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n", 190 D_ASSOC("spatial multiplexing power save mode: %s\n",
190 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? 191 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
191 "static" : 192 "static" :
192 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? 193 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
193 "dynamic" : "disabled"); 194 "dynamic" : "disabled");
194 195
195 sta_flags = priv->stations[index].sta.station_flags; 196 sta_flags = il->stations[idx].sta.station_flags;
196 197
197 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 198 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
198 199
@@ -206,7 +207,7 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
206 case WLAN_HT_CAP_SM_PS_DISABLED: 207 case WLAN_HT_CAP_SM_PS_DISABLED:
207 break; 208 break;
208 default: 209 default:
209 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode); 210 IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
210 break; 211 break;
211 } 212 }
212 213
@@ -216,27 +217,27 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
216 sta_flags |= cpu_to_le32( 217 sta_flags |= cpu_to_le32(
217 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 218 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
218 219
219 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) 220 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
220 sta_flags |= STA_FLG_HT40_EN_MSK; 221 sta_flags |= STA_FLG_HT40_EN_MSK;
221 else 222 else
222 sta_flags &= ~STA_FLG_HT40_EN_MSK; 223 sta_flags &= ~STA_FLG_HT40_EN_MSK;
223 224
224 priv->stations[index].sta.station_flags = sta_flags; 225 il->stations[idx].sta.station_flags = sta_flags;
225 done: 226 done:
226 return; 227 return;
227} 228}
228 229
229/** 230/**
230 * iwl_legacy_prep_station - Prepare station information for addition 231 * il_prep_station - Prepare station information for addition
231 * 232 *
232 * should be called with sta_lock held 233 * should be called with sta_lock held
233 */ 234 */
234u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 235u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
235 const u8 *addr, bool is_ap, struct ieee80211_sta *sta) 236 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
236{ 237{
237 struct iwl_station_entry *station; 238 struct il_station_entry *station;
238 int i; 239 int i;
239 u8 sta_id = IWL_INVALID_STATION; 240 u8 sta_id = IL_INVALID_STATION;
240 u16 rate; 241 u16 rate;
241 242
242 if (is_ap) 243 if (is_ap)
@@ -244,15 +245,15 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
244 else if (is_broadcast_ether_addr(addr)) 245 else if (is_broadcast_ether_addr(addr))
245 sta_id = ctx->bcast_sta_id; 246 sta_id = ctx->bcast_sta_id;
246 else 247 else
247 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { 248 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
248 if (!compare_ether_addr(priv->stations[i].sta.sta.addr, 249 if (!compare_ether_addr(il->stations[i].sta.sta.addr,
249 addr)) { 250 addr)) {
250 sta_id = i; 251 sta_id = i;
251 break; 252 break;
252 } 253 }
253 254
254 if (!priv->stations[i].used && 255 if (!il->stations[i].used &&
255 sta_id == IWL_INVALID_STATION) 256 sta_id == IL_INVALID_STATION)
256 sta_id = i; 257 sta_id = i;
257 } 258 }
258 259
@@ -260,7 +261,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
260 * These two conditions have the same outcome, but keep them 261 * These two conditions have the same outcome, but keep them
261 * separate 262 * separate
262 */ 263 */
263 if (unlikely(sta_id == IWL_INVALID_STATION)) 264 if (unlikely(sta_id == IL_INVALID_STATION))
264 return sta_id; 265 return sta_id;
265 266
266 /* 267 /*
@@ -268,30 +269,30 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
268 * station. Keep track if one is in progress so that we do not send 269 * station. Keep track if one is in progress so that we do not send
269 * another. 270 * another.
270 */ 271 */
271 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { 272 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
272 IWL_DEBUG_INFO(priv, 273 D_INFO(
273 "STA %d already in process of being added.\n", 274 "STA %d already in process of being added.\n",
274 sta_id); 275 sta_id);
275 return sta_id; 276 return sta_id;
276 } 277 }
277 278
278 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && 279 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
279 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && 280 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
280 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { 281 !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
281 IWL_DEBUG_ASSOC(priv, 282 D_ASSOC(
282 "STA %d (%pM) already added, not adding again.\n", 283 "STA %d (%pM) already added, not adding again.\n",
283 sta_id, addr); 284 sta_id, addr);
284 return sta_id; 285 return sta_id;
285 } 286 }
286 287
287 station = &priv->stations[sta_id]; 288 station = &il->stations[sta_id];
288 station->used = IWL_STA_DRIVER_ACTIVE; 289 station->used = IL_STA_DRIVER_ACTIVE;
289 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", 290 D_ASSOC("Add STA to driver ID %d: %pM\n",
290 sta_id, addr); 291 sta_id, addr);
291 priv->num_stations++; 292 il->num_stations++;
292 293
293 /* Set up the REPLY_ADD_STA command to send to device */ 294 /* Set up the C_ADD_STA command to send to device */
294 memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd)); 295 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
295 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 296 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
296 station->sta.mode = 0; 297 station->sta.mode = 0;
297 station->sta.sta.sta_id = sta_id; 298 station->sta.sta.sta_id = sta_id;
@@ -299,7 +300,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
299 station->ctxid = ctx->ctxid; 300 station->ctxid = ctx->ctxid;
300 301
301 if (sta) { 302 if (sta) {
302 struct iwl_station_priv_common *sta_priv; 303 struct il_station_priv_common *sta_priv;
303 304
304 sta_priv = (void *)sta->drv_priv; 305 sta_priv = (void *)sta->drv_priv;
305 sta_priv->ctx = ctx; 306 sta_priv->ctx = ctx;
@@ -310,42 +311,42 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
310 * STA and broadcast STA) pass in a NULL sta, and mac80211 311 * STA and broadcast STA) pass in a NULL sta, and mac80211
311 * doesn't allow HT IBSS. 312 * doesn't allow HT IBSS.
312 */ 313 */
313 iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx); 314 il_set_ht_add_station(il, sta_id, sta, ctx);
314 315
315 /* 3945 only */ 316 /* 3945 only */
316 rate = (priv->band == IEEE80211_BAND_5GHZ) ? 317 rate = (il->band == IEEE80211_BAND_5GHZ) ?
317 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP; 318 RATE_6M_PLCP : RATE_1M_PLCP;
318 /* Turn on both antennas for the station... */ 319 /* Turn on both antennas for the station... */
319 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 320 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
320 321
321 return sta_id; 322 return sta_id;
322 323
323} 324}
324EXPORT_SYMBOL_GPL(iwl_legacy_prep_station); 325EXPORT_SYMBOL_GPL(il_prep_station);
325 326
326#define STA_WAIT_TIMEOUT (HZ/2) 327#define STA_WAIT_TIMEOUT (HZ/2)
327 328
328/** 329/**
329 * iwl_legacy_add_station_common - 330 * il_add_station_common -
330 */ 331 */
331int 332int
332iwl_legacy_add_station_common(struct iwl_priv *priv, 333il_add_station_common(struct il_priv *il,
333 struct iwl_rxon_context *ctx, 334 struct il_rxon_context *ctx,
334 const u8 *addr, bool is_ap, 335 const u8 *addr, bool is_ap,
335 struct ieee80211_sta *sta, u8 *sta_id_r) 336 struct ieee80211_sta *sta, u8 *sta_id_r)
336{ 337{
337 unsigned long flags_spin; 338 unsigned long flags_spin;
338 int ret = 0; 339 int ret = 0;
339 u8 sta_id; 340 u8 sta_id;
340 struct iwl_legacy_addsta_cmd sta_cmd; 341 struct il_addsta_cmd sta_cmd;
341 342
342 *sta_id_r = 0; 343 *sta_id_r = 0;
343 spin_lock_irqsave(&priv->sta_lock, flags_spin); 344 spin_lock_irqsave(&il->sta_lock, flags_spin);
344 sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta); 345 sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
345 if (sta_id == IWL_INVALID_STATION) { 346 if (sta_id == IL_INVALID_STATION) {
346 IWL_ERR(priv, "Unable to prepare station %pM for addition\n", 347 IL_ERR("Unable to prepare station %pM for addition\n",
347 addr); 348 addr);
348 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 349 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
349 return -EINVAL; 350 return -EINVAL;
350 } 351 }
351 352
@@ -354,75 +355,75 @@ iwl_legacy_add_station_common(struct iwl_priv *priv,
354 * station. Keep track if one is in progress so that we do not send 355 * station. Keep track if one is in progress so that we do not send
355 * another. 356 * another.
356 */ 357 */
357 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { 358 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
358 IWL_DEBUG_INFO(priv, 359 D_INFO(
359 "STA %d already in process of being added.\n", 360 "STA %d already in process of being added.\n",
360 sta_id); 361 sta_id);
361 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 362 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
362 return -EEXIST; 363 return -EEXIST;
363 } 364 }
364 365
365 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && 366 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
366 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 367 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
367 IWL_DEBUG_ASSOC(priv, 368 D_ASSOC(
368 "STA %d (%pM) already added, not adding again.\n", 369 "STA %d (%pM) already added, not adding again.\n",
369 sta_id, addr); 370 sta_id, addr);
370 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 371 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
371 return -EEXIST; 372 return -EEXIST;
372 } 373 }
373 374
374 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; 375 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
375 memcpy(&sta_cmd, &priv->stations[sta_id].sta, 376 memcpy(&sta_cmd, &il->stations[sta_id].sta,
376 sizeof(struct iwl_legacy_addsta_cmd)); 377 sizeof(struct il_addsta_cmd));
377 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 378 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
378 379
379 /* Add station to device's station table */ 380 /* Add station to device's station table */
380 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); 381 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
381 if (ret) { 382 if (ret) {
382 spin_lock_irqsave(&priv->sta_lock, flags_spin); 383 spin_lock_irqsave(&il->sta_lock, flags_spin);
383 IWL_ERR(priv, "Adding station %pM failed.\n", 384 IL_ERR("Adding station %pM failed.\n",
384 priv->stations[sta_id].sta.sta.addr); 385 il->stations[sta_id].sta.sta.addr);
385 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 386 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
386 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 387 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
387 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 388 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
388 } 389 }
389 *sta_id_r = sta_id; 390 *sta_id_r = sta_id;
390 return ret; 391 return ret;
391} 392}
392EXPORT_SYMBOL(iwl_legacy_add_station_common); 393EXPORT_SYMBOL(il_add_station_common);
393 394
394/** 395/**
395 * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station 396 * il_sta_ucode_deactivate - deactivate ucode status for a station
396 * 397 *
397 * priv->sta_lock must be held 398 * il->sta_lock must be held
398 */ 399 */
399static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) 400static void il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
400{ 401{
401 /* Ucode must be active and driver must be non active */ 402 /* Ucode must be active and driver must be non active */
402 if ((priv->stations[sta_id].used & 403 if ((il->stations[sta_id].used &
403 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != 404 (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
404 IWL_STA_UCODE_ACTIVE) 405 IL_STA_UCODE_ACTIVE)
405 IWL_ERR(priv, "removed non active STA %u\n", sta_id); 406 IL_ERR("removed non active STA %u\n", sta_id);
406 407
407 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; 408 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
408 409
409 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); 410 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
410 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id); 411 D_ASSOC("Removed STA %u\n", sta_id);
411} 412}
412 413
413static int iwl_legacy_send_remove_station(struct iwl_priv *priv, 414static int il_send_remove_station(struct il_priv *il,
414 const u8 *addr, int sta_id, 415 const u8 *addr, int sta_id,
415 bool temporary) 416 bool temporary)
416{ 417{
417 struct iwl_rx_packet *pkt; 418 struct il_rx_pkt *pkt;
418 int ret; 419 int ret;
419 420
420 unsigned long flags_spin; 421 unsigned long flags_spin;
421 struct iwl_rem_sta_cmd rm_sta_cmd; 422 struct il_rem_sta_cmd rm_sta_cmd;
422 423
423 struct iwl_host_cmd cmd = { 424 struct il_host_cmd cmd = {
424 .id = REPLY_REMOVE_STA, 425 .id = C_REM_STA,
425 .len = sizeof(struct iwl_rem_sta_cmd), 426 .len = sizeof(struct il_rem_sta_cmd),
426 .flags = CMD_SYNC, 427 .flags = CMD_SYNC,
427 .data = &rm_sta_cmd, 428 .data = &rm_sta_cmd,
428 }; 429 };
@@ -433,14 +434,14 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
433 434
434 cmd.flags |= CMD_WANT_SKB; 435 cmd.flags |= CMD_WANT_SKB;
435 436
436 ret = iwl_legacy_send_cmd(priv, &cmd); 437 ret = il_send_cmd(il, &cmd);
437 438
438 if (ret) 439 if (ret)
439 return ret; 440 return ret;
440 441
441 pkt = (struct iwl_rx_packet *)cmd.reply_page; 442 pkt = (struct il_rx_pkt *)cmd.reply_page;
442 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 443 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
443 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 444 IL_ERR("Bad return from C_REM_STA (0x%08X)\n",
444 pkt->hdr.flags); 445 pkt->hdr.flags);
445 ret = -EIO; 446 ret = -EIO;
446 } 447 }
@@ -449,34 +450,34 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
449 switch (pkt->u.rem_sta.status) { 450 switch (pkt->u.rem_sta.status) {
450 case REM_STA_SUCCESS_MSK: 451 case REM_STA_SUCCESS_MSK:
451 if (!temporary) { 452 if (!temporary) {
452 spin_lock_irqsave(&priv->sta_lock, flags_spin); 453 spin_lock_irqsave(&il->sta_lock, flags_spin);
453 iwl_legacy_sta_ucode_deactivate(priv, sta_id); 454 il_sta_ucode_deactivate(il, sta_id);
454 spin_unlock_irqrestore(&priv->sta_lock, 455 spin_unlock_irqrestore(&il->sta_lock,
455 flags_spin); 456 flags_spin);
456 } 457 }
457 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 458 D_ASSOC("C_REM_STA PASSED\n");
458 break; 459 break;
459 default: 460 default:
460 ret = -EIO; 461 ret = -EIO;
461 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); 462 IL_ERR("C_REM_STA failed\n");
462 break; 463 break;
463 } 464 }
464 } 465 }
465 iwl_legacy_free_pages(priv, cmd.reply_page); 466 il_free_pages(il, cmd.reply_page);
466 467
467 return ret; 468 return ret;
468} 469}
469 470
470/** 471/**
471 * iwl_legacy_remove_station - Remove driver's knowledge of station. 472 * il_remove_station - Remove driver's knowledge of station.
472 */ 473 */
473int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id, 474int il_remove_station(struct il_priv *il, const u8 sta_id,
474 const u8 *addr) 475 const u8 *addr)
475{ 476{
476 unsigned long flags; 477 unsigned long flags;
477 478
478 if (!iwl_legacy_is_ready(priv)) { 479 if (!il_is_ready(il)) {
479 IWL_DEBUG_INFO(priv, 480 D_INFO(
480 "Unable to remove station %pM, device not ready.\n", 481 "Unable to remove station %pM, device not ready.\n",
481 addr); 482 addr);
482 /* 483 /*
@@ -487,85 +488,85 @@ int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
487 return 0; 488 return 0;
488 } 489 }
489 490
490 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", 491 D_ASSOC("Removing STA from driver:%d %pM\n",
491 sta_id, addr); 492 sta_id, addr);
492 493
493 if (WARN_ON(sta_id == IWL_INVALID_STATION)) 494 if (WARN_ON(sta_id == IL_INVALID_STATION))
494 return -EINVAL; 495 return -EINVAL;
495 496
496 spin_lock_irqsave(&priv->sta_lock, flags); 497 spin_lock_irqsave(&il->sta_lock, flags);
497 498
498 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 499 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
499 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", 500 D_INFO("Removing %pM but non DRIVER active\n",
500 addr); 501 addr);
501 goto out_err; 502 goto out_err;
502 } 503 }
503 504
504 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 505 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
505 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n", 506 D_INFO("Removing %pM but non UCODE active\n",
506 addr); 507 addr);
507 goto out_err; 508 goto out_err;
508 } 509 }
509 510
510 if (priv->stations[sta_id].used & IWL_STA_LOCAL) { 511 if (il->stations[sta_id].used & IL_STA_LOCAL) {
511 kfree(priv->stations[sta_id].lq); 512 kfree(il->stations[sta_id].lq);
512 priv->stations[sta_id].lq = NULL; 513 il->stations[sta_id].lq = NULL;
513 } 514 }
514 515
515 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 516 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
516 517
517 priv->num_stations--; 518 il->num_stations--;
518 519
519 BUG_ON(priv->num_stations < 0); 520 BUG_ON(il->num_stations < 0);
520 521
521 spin_unlock_irqrestore(&priv->sta_lock, flags); 522 spin_unlock_irqrestore(&il->sta_lock, flags);
522 523
523 return iwl_legacy_send_remove_station(priv, addr, sta_id, false); 524 return il_send_remove_station(il, addr, sta_id, false);
524out_err: 525out_err:
525 spin_unlock_irqrestore(&priv->sta_lock, flags); 526 spin_unlock_irqrestore(&il->sta_lock, flags);
526 return -EINVAL; 527 return -EINVAL;
527} 528}
528EXPORT_SYMBOL_GPL(iwl_legacy_remove_station); 529EXPORT_SYMBOL_GPL(il_remove_station);
529 530
530/** 531/**
531 * iwl_legacy_clear_ucode_stations - clear ucode station table bits 532 * il_clear_ucode_stations - clear ucode station table bits
532 * 533 *
533 * This function clears all the bits in the driver indicating 534 * This function clears all the bits in the driver indicating
534 * which stations are active in the ucode. Call when something 535 * which stations are active in the ucode. Call when something
535 * other than explicit station management would cause this in 536 * other than explicit station management would cause this in
536 * the ucode, e.g. unassociated RXON. 537 * the ucode, e.g. unassociated RXON.
537 */ 538 */
538void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, 539void il_clear_ucode_stations(struct il_priv *il,
539 struct iwl_rxon_context *ctx) 540 struct il_rxon_context *ctx)
540{ 541{
541 int i; 542 int i;
542 unsigned long flags_spin; 543 unsigned long flags_spin;
543 bool cleared = false; 544 bool cleared = false;
544 545
545 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); 546 D_INFO("Clearing ucode stations in driver\n");
546 547
547 spin_lock_irqsave(&priv->sta_lock, flags_spin); 548 spin_lock_irqsave(&il->sta_lock, flags_spin);
548 for (i = 0; i < priv->hw_params.max_stations; i++) { 549 for (i = 0; i < il->hw_params.max_stations; i++) {
549 if (ctx && ctx->ctxid != priv->stations[i].ctxid) 550 if (ctx && ctx->ctxid != il->stations[i].ctxid)
550 continue; 551 continue;
551 552
552 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { 553 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
553 IWL_DEBUG_INFO(priv, 554 D_INFO(
554 "Clearing ucode active for station %d\n", i); 555 "Clearing ucode active for station %d\n", i);
555 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; 556 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
556 cleared = true; 557 cleared = true;
557 } 558 }
558 } 559 }
559 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 560 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
560 561
561 if (!cleared) 562 if (!cleared)
562 IWL_DEBUG_INFO(priv, 563 D_INFO(
563 "No active stations found to be cleared\n"); 564 "No active stations found to be cleared\n");
564} 565}
565EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations); 566EXPORT_SYMBOL(il_clear_ucode_stations);
566 567
567/** 568/**
568 * iwl_legacy_restore_stations() - Restore driver known stations to device 569 * il_restore_stations() - Restore driver known stations to device
569 * 570 *
570 * All stations considered active by driver, but not present in ucode, is 571 * All stations considered active by driver, but not present in ucode, is
571 * restored. 572 * restored.
@@ -573,58 +574,58 @@ EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
573 * Function sleeps. 574 * Function sleeps.
574 */ 575 */
575void 576void
576iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 577il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
577{ 578{
578 struct iwl_legacy_addsta_cmd sta_cmd; 579 struct il_addsta_cmd sta_cmd;
579 struct iwl_link_quality_cmd lq; 580 struct il_link_quality_cmd lq;
580 unsigned long flags_spin; 581 unsigned long flags_spin;
581 int i; 582 int i;
582 bool found = false; 583 bool found = false;
583 int ret; 584 int ret;
584 bool send_lq; 585 bool send_lq;
585 586
586 if (!iwl_legacy_is_ready(priv)) { 587 if (!il_is_ready(il)) {
587 IWL_DEBUG_INFO(priv, 588 D_INFO(
588 "Not ready yet, not restoring any stations.\n"); 589 "Not ready yet, not restoring any stations.\n");
589 return; 590 return;
590 } 591 }
591 592
592 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); 593 D_ASSOC("Restoring all known stations ... start.\n");
593 spin_lock_irqsave(&priv->sta_lock, flags_spin); 594 spin_lock_irqsave(&il->sta_lock, flags_spin);
594 for (i = 0; i < priv->hw_params.max_stations; i++) { 595 for (i = 0; i < il->hw_params.max_stations; i++) {
595 if (ctx->ctxid != priv->stations[i].ctxid) 596 if (ctx->ctxid != il->stations[i].ctxid)
596 continue; 597 continue;
597 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && 598 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
598 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { 599 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
599 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", 600 D_ASSOC("Restoring sta %pM\n",
600 priv->stations[i].sta.sta.addr); 601 il->stations[i].sta.sta.addr);
601 priv->stations[i].sta.mode = 0; 602 il->stations[i].sta.mode = 0;
602 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS; 603 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
603 found = true; 604 found = true;
604 } 605 }
605 } 606 }
606 607
607 for (i = 0; i < priv->hw_params.max_stations; i++) { 608 for (i = 0; i < il->hw_params.max_stations; i++) {
608 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { 609 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
609 memcpy(&sta_cmd, &priv->stations[i].sta, 610 memcpy(&sta_cmd, &il->stations[i].sta,
610 sizeof(struct iwl_legacy_addsta_cmd)); 611 sizeof(struct il_addsta_cmd));
611 send_lq = false; 612 send_lq = false;
612 if (priv->stations[i].lq) { 613 if (il->stations[i].lq) {
613 memcpy(&lq, priv->stations[i].lq, 614 memcpy(&lq, il->stations[i].lq,
614 sizeof(struct iwl_link_quality_cmd)); 615 sizeof(struct il_link_quality_cmd));
615 send_lq = true; 616 send_lq = true;
616 } 617 }
617 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 618 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
618 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); 619 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
619 if (ret) { 620 if (ret) {
620 spin_lock_irqsave(&priv->sta_lock, flags_spin); 621 spin_lock_irqsave(&il->sta_lock, flags_spin);
621 IWL_ERR(priv, "Adding station %pM failed.\n", 622 IL_ERR("Adding station %pM failed.\n",
622 priv->stations[i].sta.sta.addr); 623 il->stations[i].sta.sta.addr);
623 priv->stations[i].used &= 624 il->stations[i].used &=
624 ~IWL_STA_DRIVER_ACTIVE; 625 ~IL_STA_DRIVER_ACTIVE;
625 priv->stations[i].used &= 626 il->stations[i].used &=
626 ~IWL_STA_UCODE_INPROGRESS; 627 ~IL_STA_UCODE_INPROGRESS;
627 spin_unlock_irqrestore(&priv->sta_lock, 628 spin_unlock_irqrestore(&il->sta_lock,
628 flags_spin); 629 flags_spin);
629 } 630 }
630 /* 631 /*
@@ -632,78 +633,78 @@ iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
632 * current LQ command 633 * current LQ command
633 */ 634 */
634 if (send_lq) 635 if (send_lq)
635 iwl_legacy_send_lq_cmd(priv, ctx, &lq, 636 il_send_lq_cmd(il, ctx, &lq,
636 CMD_SYNC, true); 637 CMD_SYNC, true);
637 spin_lock_irqsave(&priv->sta_lock, flags_spin); 638 spin_lock_irqsave(&il->sta_lock, flags_spin);
638 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 639 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
639 } 640 }
640 } 641 }
641 642
642 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 643 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
643 if (!found) 644 if (!found)
644 IWL_DEBUG_INFO(priv, "Restoring all known stations" 645 D_INFO("Restoring all known stations"
645 " .... no stations to be restored.\n"); 646 " .... no stations to be restored.\n");
646 else 647 else
647 IWL_DEBUG_INFO(priv, "Restoring all known stations" 648 D_INFO("Restoring all known stations"
648 " .... complete.\n"); 649 " .... complete.\n");
649} 650}
650EXPORT_SYMBOL(iwl_legacy_restore_stations); 651EXPORT_SYMBOL(il_restore_stations);
651 652
652int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv) 653int il_get_free_ucode_key_idx(struct il_priv *il)
653{ 654{
654 int i; 655 int i;
655 656
656 for (i = 0; i < priv->sta_key_max_num; i++) 657 for (i = 0; i < il->sta_key_max_num; i++)
657 if (!test_and_set_bit(i, &priv->ucode_key_table)) 658 if (!test_and_set_bit(i, &il->ucode_key_table))
658 return i; 659 return i;
659 660
660 return WEP_INVALID_OFFSET; 661 return WEP_INVALID_OFFSET;
661} 662}
662EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index); 663EXPORT_SYMBOL(il_get_free_ucode_key_idx);
663 664
664void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv) 665void il_dealloc_bcast_stations(struct il_priv *il)
665{ 666{
666 unsigned long flags; 667 unsigned long flags;
667 int i; 668 int i;
668 669
669 spin_lock_irqsave(&priv->sta_lock, flags); 670 spin_lock_irqsave(&il->sta_lock, flags);
670 for (i = 0; i < priv->hw_params.max_stations; i++) { 671 for (i = 0; i < il->hw_params.max_stations; i++) {
671 if (!(priv->stations[i].used & IWL_STA_BCAST)) 672 if (!(il->stations[i].used & IL_STA_BCAST))
672 continue; 673 continue;
673 674
674 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; 675 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
675 priv->num_stations--; 676 il->num_stations--;
676 BUG_ON(priv->num_stations < 0); 677 BUG_ON(il->num_stations < 0);
677 kfree(priv->stations[i].lq); 678 kfree(il->stations[i].lq);
678 priv->stations[i].lq = NULL; 679 il->stations[i].lq = NULL;
679 } 680 }
680 spin_unlock_irqrestore(&priv->sta_lock, flags); 681 spin_unlock_irqrestore(&il->sta_lock, flags);
681} 682}
682EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations); 683EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
683 684
684#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG 685#ifdef CONFIG_IWLEGACY_DEBUG
685static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, 686static void il_dump_lq_cmd(struct il_priv *il,
686 struct iwl_link_quality_cmd *lq) 687 struct il_link_quality_cmd *lq)
687{ 688{
688 int i; 689 int i;
689 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); 690 D_RATE("lq station id 0x%x\n", lq->sta_id);
690 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", 691 D_RATE("lq ant 0x%X 0x%X\n",
691 lq->general_params.single_stream_ant_msk, 692 lq->general_params.single_stream_ant_msk,
692 lq->general_params.dual_stream_ant_msk); 693 lq->general_params.dual_stream_ant_msk);
693 694
694 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 695 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
695 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", 696 D_RATE("lq idx %d 0x%X\n",
696 i, lq->rs_table[i].rate_n_flags); 697 i, lq->rs_table[i].rate_n_flags);
697} 698}
698#else 699#else
699static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, 700static inline void il_dump_lq_cmd(struct il_priv *il,
700 struct iwl_link_quality_cmd *lq) 701 struct il_link_quality_cmd *lq)
701{ 702{
702} 703}
703#endif 704#endif
704 705
705/** 706/**
706 * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity 707 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
707 * 708 *
708 * It sometimes happens when a HT rate has been in use and we 709 * It sometimes happens when a HT rate has been in use and we
709 * loose connectivity with AP then mac80211 will first tell us that the 710 * loose connectivity with AP then mac80211 will first tell us that the
@@ -713,22 +714,22 @@ static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
713 * Test for this to prevent driver from sending LQ command between the time 714 * Test for this to prevent driver from sending LQ command between the time
714 * RXON flags are updated and when LQ command is updated. 715 * RXON flags are updated and when LQ command is updated.
715 */ 716 */
716static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv, 717static bool il_is_lq_table_valid(struct il_priv *il,
717 struct iwl_rxon_context *ctx, 718 struct il_rxon_context *ctx,
718 struct iwl_link_quality_cmd *lq) 719 struct il_link_quality_cmd *lq)
719{ 720{
720 int i; 721 int i;
721 722
722 if (ctx->ht.enabled) 723 if (ctx->ht.enabled)
723 return true; 724 return true;
724 725
725 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", 726 D_INFO("Channel %u is not an HT channel\n",
726 ctx->active.channel); 727 ctx->active.channel);
727 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 728 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
728 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & 729 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
729 RATE_MCS_HT_MSK) { 730 RATE_MCS_HT_MSK) {
730 IWL_DEBUG_INFO(priv, 731 D_INFO(
731 "index %d of LQ expects HT channel\n", 732 "idx %d of LQ expects HT channel\n",
732 i); 733 i);
733 return false; 734 return false;
734 } 735 }
@@ -737,7 +738,7 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
737} 738}
738 739
739/** 740/**
740 * iwl_legacy_send_lq_cmd() - Send link quality command 741 * il_send_lq_cmd() - Send link quality command
741 * @init: This command is sent as part of station initialization right 742 * @init: This command is sent as part of station initialization right
742 * after station has been added. 743 * after station has been added.
743 * 744 *
@@ -746,35 +747,35 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
746 * this case to clear the state indicating that station creation is in 747 * this case to clear the state indicating that station creation is in
747 * progress. 748 * progress.
748 */ 749 */
749int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 750int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
750 struct iwl_link_quality_cmd *lq, u8 flags, bool init) 751 struct il_link_quality_cmd *lq, u8 flags, bool init)
751{ 752{
752 int ret = 0; 753 int ret = 0;
753 unsigned long flags_spin; 754 unsigned long flags_spin;
754 755
755 struct iwl_host_cmd cmd = { 756 struct il_host_cmd cmd = {
756 .id = REPLY_TX_LINK_QUALITY_CMD, 757 .id = C_TX_LINK_QUALITY_CMD,
757 .len = sizeof(struct iwl_link_quality_cmd), 758 .len = sizeof(struct il_link_quality_cmd),
758 .flags = flags, 759 .flags = flags,
759 .data = lq, 760 .data = lq,
760 }; 761 };
761 762
762 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) 763 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
763 return -EINVAL; 764 return -EINVAL;
764 765
765 766
766 spin_lock_irqsave(&priv->sta_lock, flags_spin); 767 spin_lock_irqsave(&il->sta_lock, flags_spin);
767 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 768 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
768 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 769 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
769 return -EINVAL; 770 return -EINVAL;
770 } 771 }
771 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 772 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
772 773
773 iwl_legacy_dump_lq_cmd(priv, lq); 774 il_dump_lq_cmd(il, lq);
774 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 775 BUG_ON(init && (cmd.flags & CMD_ASYNC));
775 776
776 if (iwl_legacy_is_lq_table_valid(priv, ctx, lq)) 777 if (il_is_lq_table_valid(il, ctx, lq))
777 ret = iwl_legacy_send_cmd(priv, &cmd); 778 ret = il_send_cmd(il, &cmd);
778 else 779 else
779 ret = -EINVAL; 780 ret = -EINVAL;
780 781
@@ -782,35 +783,35 @@ int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
782 return ret; 783 return ret;
783 784
784 if (init) { 785 if (init) {
785 IWL_DEBUG_INFO(priv, "init LQ command complete," 786 D_INFO("init LQ command complete,"
786 " clearing sta addition status for sta %d\n", 787 " clearing sta addition status for sta %d\n",
787 lq->sta_id); 788 lq->sta_id);
788 spin_lock_irqsave(&priv->sta_lock, flags_spin); 789 spin_lock_irqsave(&il->sta_lock, flags_spin);
789 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 790 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
790 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 791 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
791 } 792 }
792 return ret; 793 return ret;
793} 794}
794EXPORT_SYMBOL(iwl_legacy_send_lq_cmd); 795EXPORT_SYMBOL(il_send_lq_cmd);
795 796
796int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, 797int il_mac_sta_remove(struct ieee80211_hw *hw,
797 struct ieee80211_vif *vif, 798 struct ieee80211_vif *vif,
798 struct ieee80211_sta *sta) 799 struct ieee80211_sta *sta)
799{ 800{
800 struct iwl_priv *priv = hw->priv; 801 struct il_priv *il = hw->priv;
801 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv; 802 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
802 int ret; 803 int ret;
803 804
804 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n", 805 D_INFO("received request to remove station %pM\n",
805 sta->addr); 806 sta->addr);
806 mutex_lock(&priv->mutex); 807 mutex_lock(&il->mutex);
807 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", 808 D_INFO("proceeding to remove station %pM\n",
808 sta->addr); 809 sta->addr);
809 ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr); 810 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
810 if (ret) 811 if (ret)
811 IWL_ERR(priv, "Error removing station %pM\n", 812 IL_ERR("Error removing station %pM\n",
812 sta->addr); 813 sta->addr);
813 mutex_unlock(&priv->mutex); 814 mutex_unlock(&il->mutex);
814 return ret; 815 return ret;
815} 816}
816EXPORT_SYMBOL(iwl_legacy_mac_sta_remove); 817EXPORT_SYMBOL(il_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
deleted file mode 100644
index 67bd75fe01a1..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.h
+++ /dev/null
@@ -1,148 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_legacy_sta_h__
30#define __iwl_legacy_sta_h__
31
32#include "iwl-dev.h"
33
34#define HW_KEY_DYNAMIC 0
35#define HW_KEY_DEFAULT 1
36
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
40 being activated */
41#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
42 (this is for the IBSS BSSID stations) */
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44
45
46void iwl_legacy_restore_stations(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx);
48void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
49 struct iwl_rxon_context *ctx);
50void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
51int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
52int iwl_legacy_send_add_sta(struct iwl_priv *priv,
53 struct iwl_legacy_addsta_cmd *sta, u8 flags);
54int iwl_legacy_add_station_common(struct iwl_priv *priv,
55 struct iwl_rxon_context *ctx,
56 const u8 *addr, bool is_ap,
57 struct ieee80211_sta *sta, u8 *sta_id_r);
58int iwl_legacy_remove_station(struct iwl_priv *priv,
59 const u8 sta_id,
60 const u8 *addr);
61int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
62 struct ieee80211_vif *vif,
63 struct ieee80211_sta *sta);
64
65u8 iwl_legacy_prep_station(struct iwl_priv *priv,
66 struct iwl_rxon_context *ctx,
67 const u8 *addr, bool is_ap,
68 struct ieee80211_sta *sta);
69
70int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
71 struct iwl_rxon_context *ctx,
72 struct iwl_link_quality_cmd *lq,
73 u8 flags, bool init);
74
75/**
76 * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
77 * @priv: iwl priv struct
78 *
79 * This is called during iwl_down() to make sure that in the case
80 * we're coming there from a hardware restart mac80211 will be
81 * able to reconfigure stations -- if we're getting there in the
82 * normal down flow then the stations will already be cleared.
83 */
84static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
85{
86 unsigned long flags;
87 struct iwl_rxon_context *ctx;
88
89 spin_lock_irqsave(&priv->sta_lock, flags);
90 memset(priv->stations, 0, sizeof(priv->stations));
91 priv->num_stations = 0;
92
93 priv->ucode_key_table = 0;
94
95 for_each_context(priv, ctx) {
96 /*
97 * Remove all key information that is not stored as part
98 * of station information since mac80211 may not have had
99 * a chance to remove all the keys. When device is
100 * reconfigured by mac80211 after an error all keys will
101 * be reconfigured.
102 */
103 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
104 ctx->key_mapping_keys = 0;
105 }
106
107 spin_unlock_irqrestore(&priv->sta_lock, flags);
108}
109
110static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
111{
112 if (WARN_ON(!sta))
113 return IWL_INVALID_STATION;
114
115 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
116}
117
118/**
119 * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
120 * @priv: iwl priv
121 * @context: the current context
122 * @sta: mac80211 station
123 *
124 * In certain circumstances mac80211 passes a station pointer
125 * that may be %NULL, for example during TX or key setup. In
126 * that case, we need to use the broadcast station, so this
127 * inline wraps that pattern.
128 */
129static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
130 struct iwl_rxon_context *context,
131 struct ieee80211_sta *sta)
132{
133 int sta_id;
134
135 if (!sta)
136 return context->bcast_sta_id;
137
138 sta_id = iwl_legacy_sta_id(sta);
139
140 /*
141 * mac80211 should not be passing a partially
142 * initialised station!
143 */
144 WARN_ON(sta_id == IWL_INVALID_STATION);
145
146 return sta_id;
147}
148#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
deleted file mode 100644
index ef9e268bf8a0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ /dev/null
@@ -1,658 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <net/mac80211.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
41/**
42 * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
43 */
44void
45iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
46{
47 u32 reg = 0;
48 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
51 return;
52
53 /* if we're trying to save power */
54 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
55 /* wake up nic if it's powered down ...
56 * uCode will wake up, and interrupt us again, so next
57 * time we'll skip this part. */
58 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
59
60 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
61 IWL_DEBUG_INFO(priv,
62 "Tx queue %d requesting wakeup,"
63 " GP1 = 0x%x\n", txq_id, reg);
64 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
65 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
66 return;
67 }
68
69 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
70 txq->q.write_ptr | (txq_id << 8));
71
72 /*
73 * else not in power-save mode,
74 * uCode will never sleep when we're
75 * trying to tx (during RFKILL, we're not trying to tx).
76 */
77 } else
78 iwl_write32(priv, HBUS_TARG_WRPTR,
79 txq->q.write_ptr | (txq_id << 8));
80 txq->need_update = 0;
81}
82EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
83
84/**
85 * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
86 */
87void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
88{
89 struct iwl_tx_queue *txq = &priv->txq[txq_id];
90 struct iwl_queue *q = &txq->q;
91
92 if (q->n_bd == 0)
93 return;
94
95 while (q->write_ptr != q->read_ptr) {
96 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
97 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
98 }
99}
100EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
101
102/**
103 * iwl_legacy_tx_queue_free - Deallocate DMA queue.
104 * @txq: Transmit queue to deallocate.
105 *
106 * Empty queue by removing and destroying all BD's.
107 * Free all buffers.
108 * 0-fill, but do not free "txq" descriptor structure.
109 */
110void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
111{
112 struct iwl_tx_queue *txq = &priv->txq[txq_id];
113 struct device *dev = &priv->pci_dev->dev;
114 int i;
115
116 iwl_legacy_tx_queue_unmap(priv, txq_id);
117
118 /* De-alloc array of command/tx buffers */
119 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
120 kfree(txq->cmd[i]);
121
122 /* De-alloc circular buffer of TFDs */
123 if (txq->q.n_bd)
124 dma_free_coherent(dev, priv->hw_params.tfd_size *
125 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
126
127 /* De-alloc array of per-TFD driver data */
128 kfree(txq->txb);
129 txq->txb = NULL;
130
131 /* deallocate arrays */
132 kfree(txq->cmd);
133 kfree(txq->meta);
134 txq->cmd = NULL;
135 txq->meta = NULL;
136
137 /* 0-fill queue descriptor structure */
138 memset(txq, 0, sizeof(*txq));
139}
140EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
141
142/**
143 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
144 */
145void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
146{
147 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
148 struct iwl_queue *q = &txq->q;
149 int i;
150
151 if (q->n_bd == 0)
152 return;
153
154 while (q->read_ptr != q->write_ptr) {
155 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
156
157 if (txq->meta[i].flags & CMD_MAPPED) {
158 pci_unmap_single(priv->pci_dev,
159 dma_unmap_addr(&txq->meta[i], mapping),
160 dma_unmap_len(&txq->meta[i], len),
161 PCI_DMA_BIDIRECTIONAL);
162 txq->meta[i].flags = 0;
163 }
164
165 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
166 }
167
168 i = q->n_window;
169 if (txq->meta[i].flags & CMD_MAPPED) {
170 pci_unmap_single(priv->pci_dev,
171 dma_unmap_addr(&txq->meta[i], mapping),
172 dma_unmap_len(&txq->meta[i], len),
173 PCI_DMA_BIDIRECTIONAL);
174 txq->meta[i].flags = 0;
175 }
176}
177EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
178
179/**
180 * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
181 * @txq: Transmit queue to deallocate.
182 *
183 * Empty queue by removing and destroying all BD's.
184 * Free all buffers.
185 * 0-fill, but do not free "txq" descriptor structure.
186 */
187void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
188{
189 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
190 struct device *dev = &priv->pci_dev->dev;
191 int i;
192
193 iwl_legacy_cmd_queue_unmap(priv);
194
195 /* De-alloc array of command/tx buffers */
196 for (i = 0; i <= TFD_CMD_SLOTS; i++)
197 kfree(txq->cmd[i]);
198
199 /* De-alloc circular buffer of TFDs */
200 if (txq->q.n_bd)
201 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
202 txq->tfds, txq->q.dma_addr);
203
204 /* deallocate arrays */
205 kfree(txq->cmd);
206 kfree(txq->meta);
207 txq->cmd = NULL;
208 txq->meta = NULL;
209
210 /* 0-fill queue descriptor structure */
211 memset(txq, 0, sizeof(*txq));
212}
213EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
214
215/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
216 * DMA services
217 *
218 * Theory of operation
219 *
220 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
221 * of buffer descriptors, each of which points to one or more data buffers for
222 * the device to read from or fill. Driver and device exchange status of each
223 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
224 * entries in each circular buffer, to protect against confusing empty and full
225 * queue states.
226 *
227 * The device reads or writes the data in the queues via the device's several
228 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
229 *
230 * For Tx queue, there are low mark and high mark limits. If, after queuing
231 * the packet for Tx, free space become < low mark, Tx queue stopped. When
232 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
233 * Tx queue resumed.
234 *
235 * See more detailed info in iwl-4965-hw.h.
236 ***************************************************/
237
238int iwl_legacy_queue_space(const struct iwl_queue *q)
239{
240 int s = q->read_ptr - q->write_ptr;
241
242 if (q->read_ptr > q->write_ptr)
243 s -= q->n_bd;
244
245 if (s <= 0)
246 s += q->n_window;
247 /* keep some reserve to not confuse empty and full situations */
248 s -= 2;
249 if (s < 0)
250 s = 0;
251 return s;
252}
253EXPORT_SYMBOL(iwl_legacy_queue_space);
254
255
256/**
257 * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
258 */
259static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
260 int count, int slots_num, u32 id)
261{
262 q->n_bd = count;
263 q->n_window = slots_num;
264 q->id = id;
265
266 /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
267 * and iwl_legacy_queue_dec_wrap are broken. */
268 BUG_ON(!is_power_of_2(count));
269
270 /* slots_num must be power-of-two size, otherwise
271 * iwl_legacy_get_cmd_index is broken. */
272 BUG_ON(!is_power_of_2(slots_num));
273
274 q->low_mark = q->n_window / 4;
275 if (q->low_mark < 4)
276 q->low_mark = 4;
277
278 q->high_mark = q->n_window / 8;
279 if (q->high_mark < 2)
280 q->high_mark = 2;
281
282 q->write_ptr = q->read_ptr = 0;
283
284 return 0;
285}
286
287/**
288 * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
289 */
290static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
291 struct iwl_tx_queue *txq, u32 id)
292{
293 struct device *dev = &priv->pci_dev->dev;
294 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
295
296 /* Driver private data, only for Tx (not command) queues,
297 * not shared with device. */
298 if (id != priv->cmd_queue) {
299 txq->txb = kzalloc(sizeof(txq->txb[0]) *
300 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
301 if (!txq->txb) {
302 IWL_ERR(priv, "kmalloc for auxiliary BD "
303 "structures failed\n");
304 goto error;
305 }
306 } else {
307 txq->txb = NULL;
308 }
309
310 /* Circular buffer of transmit frame descriptors (TFDs),
311 * shared with device */
312 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
313 GFP_KERNEL);
314 if (!txq->tfds) {
315 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
316 goto error;
317 }
318 txq->q.id = id;
319
320 return 0;
321
322 error:
323 kfree(txq->txb);
324 txq->txb = NULL;
325
326 return -ENOMEM;
327}
328
329/**
330 * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
331 */
332int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
333 int slots_num, u32 txq_id)
334{
335 int i, len;
336 int ret;
337 int actual_slots = slots_num;
338
339 /*
340 * Alloc buffer array for commands (Tx or other types of commands).
341 * For the command queue (#4/#9), allocate command space + one big
342 * command for scan, since scan command is very huge; the system will
343 * not have two scans at the same time, so only one is needed.
344 * For normal Tx queues (all other queues), no super-size command
345 * space is needed.
346 */
347 if (txq_id == priv->cmd_queue)
348 actual_slots++;
349
350 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
351 GFP_KERNEL);
352 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
353 GFP_KERNEL);
354
355 if (!txq->meta || !txq->cmd)
356 goto out_free_arrays;
357
358 len = sizeof(struct iwl_device_cmd);
359 for (i = 0; i < actual_slots; i++) {
360 /* only happens for cmd queue */
361 if (i == slots_num)
362 len = IWL_MAX_CMD_SIZE;
363
364 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
365 if (!txq->cmd[i])
366 goto err;
367 }
368
369 /* Alloc driver data array and TFD circular buffer */
370 ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
371 if (ret)
372 goto err;
373
374 txq->need_update = 0;
375
376 /*
377 * For the default queues 0-3, set up the swq_id
378 * already -- all others need to get one later
379 * (if they need one at all).
380 */
381 if (txq_id < 4)
382 iwl_legacy_set_swq_id(txq, txq_id, txq_id);
383
384 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
385 * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387
388 /* Initialize queue's high/low-water marks, and head/tail indexes */
389 iwl_legacy_queue_init(priv, &txq->q,
390 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
391
392 /* Tell device where to find queue */
393 priv->cfg->ops->lib->txq_init(priv, txq);
394
395 return 0;
396err:
397 for (i = 0; i < actual_slots; i++)
398 kfree(txq->cmd[i]);
399out_free_arrays:
400 kfree(txq->meta);
401 kfree(txq->cmd);
402
403 return -ENOMEM;
404}
405EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
406
407void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
408 int slots_num, u32 txq_id)
409{
410 int actual_slots = slots_num;
411
412 if (txq_id == priv->cmd_queue)
413 actual_slots++;
414
415 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
416
417 txq->need_update = 0;
418
419 /* Initialize queue's high/low-water marks, and head/tail indexes */
420 iwl_legacy_queue_init(priv, &txq->q,
421 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
422
423 /* Tell device where to find queue */
424 priv->cfg->ops->lib->txq_init(priv, txq);
425}
426EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
427
428/*************** HOST COMMAND QUEUE FUNCTIONS *****/
429
430/**
431 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
432 * @priv: device private data point
433 * @cmd: a point to the ucode command structure
434 *
435 * The function returns < 0 values to indicate the operation is
436 * failed. On success, it turns the index (> 0) of command in the
437 * command queue.
438 */
439int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
440{
441 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
442 struct iwl_queue *q = &txq->q;
443 struct iwl_device_cmd *out_cmd;
444 struct iwl_cmd_meta *out_meta;
445 dma_addr_t phys_addr;
446 unsigned long flags;
447 int len;
448 u32 idx;
449 u16 fix_size;
450
451 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
452 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
453
454 /* If any of the command structures end up being larger than
455 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
456 * we will need to increase the size of the TFD entries
457 * Also, check to see if command buffer should not exceed the size
458 * of device_cmd and max_cmd_size. */
459 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
460 !(cmd->flags & CMD_SIZE_HUGE));
461 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
462
463 if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
464 IWL_WARN(priv, "Not sending command - %s KILL\n",
465 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
466 return -EIO;
467 }
468
469 spin_lock_irqsave(&priv->hcmd_lock, flags);
470
471 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
472 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
473
474 IWL_ERR(priv, "Restarting adapter due to command queue full\n");
475 queue_work(priv->workqueue, &priv->restart);
476 return -ENOSPC;
477 }
478
479 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
480 out_cmd = txq->cmd[idx];
481 out_meta = &txq->meta[idx];
482
483 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
484 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
485 return -ENOSPC;
486 }
487
488 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
489 out_meta->flags = cmd->flags | CMD_MAPPED;
490 if (cmd->flags & CMD_WANT_SKB)
491 out_meta->source = cmd;
492 if (cmd->flags & CMD_ASYNC)
493 out_meta->callback = cmd->callback;
494
495 out_cmd->hdr.cmd = cmd->id;
496 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
497
498 /* At this point, the out_cmd now has all of the incoming cmd
499 * information */
500
501 out_cmd->hdr.flags = 0;
502 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
503 INDEX_TO_SEQ(q->write_ptr));
504 if (cmd->flags & CMD_SIZE_HUGE)
505 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
506 len = sizeof(struct iwl_device_cmd);
507 if (idx == TFD_CMD_SLOTS)
508 len = IWL_MAX_CMD_SIZE;
509
510#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
511 switch (out_cmd->hdr.cmd) {
512 case REPLY_TX_LINK_QUALITY_CMD:
513 case SENSITIVITY_CMD:
514 IWL_DEBUG_HC_DUMP(priv,
515 "Sending command %s (#%x), seq: 0x%04X, "
516 "%d bytes at %d[%d]:%d\n",
517 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
518 out_cmd->hdr.cmd,
519 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
520 q->write_ptr, idx, priv->cmd_queue);
521 break;
522 default:
523 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
524 "%d bytes at %d[%d]:%d\n",
525 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
526 out_cmd->hdr.cmd,
527 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
528 q->write_ptr, idx, priv->cmd_queue);
529 }
530#endif
531 txq->need_update = 1;
532
533 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
534 /* Set up entry in queue's byte count circular buffer */
535 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
536
537 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
538 fix_size, PCI_DMA_BIDIRECTIONAL);
539 dma_unmap_addr_set(out_meta, mapping, phys_addr);
540 dma_unmap_len_set(out_meta, len, fix_size);
541
542 trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
543 fix_size, cmd->flags);
544
545 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
546 phys_addr, fix_size, 1,
547 U32_PAD(cmd->len));
548
549 /* Increment and update queue's write index */
550 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
551 iwl_legacy_txq_update_write_ptr(priv, txq);
552
553 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
554 return idx;
555}
556
557/**
558 * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
559 *
560 * When FW advances 'R' index, all entries between old and new 'R' index
561 * need to be reclaimed. As result, some free space forms. If there is
562 * enough free space (> low mark), wake the stack that feeds us.
563 */
564static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
565 int idx, int cmd_idx)
566{
567 struct iwl_tx_queue *txq = &priv->txq[txq_id];
568 struct iwl_queue *q = &txq->q;
569 int nfreed = 0;
570
571 if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
572 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
573 "is out of range [0-%d] %d %d.\n", txq_id,
574 idx, q->n_bd, q->write_ptr, q->read_ptr);
575 return;
576 }
577
578 for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
579 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
580
581 if (nfreed++ > 0) {
582 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
583 q->write_ptr, q->read_ptr);
584 queue_work(priv->workqueue, &priv->restart);
585 }
586
587 }
588}
589
590/**
591 * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
592 * @rxb: Rx buffer to reclaim
593 *
594 * If an Rx buffer has an async callback associated with it the callback
595 * will be executed. The attached skb (if present) will only be freed
596 * if the callback returns 1
597 */
598void
599iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
600{
601 struct iwl_rx_packet *pkt = rxb_addr(rxb);
602 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
603 int txq_id = SEQ_TO_QUEUE(sequence);
604 int index = SEQ_TO_INDEX(sequence);
605 int cmd_index;
606 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
607 struct iwl_device_cmd *cmd;
608 struct iwl_cmd_meta *meta;
609 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
610 unsigned long flags;
611
612 /* If a Tx command is being handled and it isn't in the actual
613 * command queue then there a command routing bug has been introduced
614 * in the queue management code. */
615 if (WARN(txq_id != priv->cmd_queue,
616 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
617 txq_id, priv->cmd_queue, sequence,
618 priv->txq[priv->cmd_queue].q.read_ptr,
619 priv->txq[priv->cmd_queue].q.write_ptr)) {
620 iwl_print_hex_error(priv, pkt, 32);
621 return;
622 }
623
624 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
625 cmd = txq->cmd[cmd_index];
626 meta = &txq->meta[cmd_index];
627
628 txq->time_stamp = jiffies;
629
630 pci_unmap_single(priv->pci_dev,
631 dma_unmap_addr(meta, mapping),
632 dma_unmap_len(meta, len),
633 PCI_DMA_BIDIRECTIONAL);
634
635 /* Input error checking is done when commands are added to queue. */
636 if (meta->flags & CMD_WANT_SKB) {
637 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
638 rxb->page = NULL;
639 } else if (meta->callback)
640 meta->callback(priv, cmd, pkt);
641
642 spin_lock_irqsave(&priv->hcmd_lock, flags);
643
644 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
645
646 if (!(meta->flags & CMD_ASYNC)) {
647 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
648 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
649 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
650 wake_up(&priv->wait_command_queue);
651 }
652
653 /* Mark as unmapped */
654 meta->flags = 0;
655
656 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
657}
658EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
deleted file mode 100644
index b282d869a546..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ /dev/null
@@ -1,4016 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/ieee80211_radiotap.h>
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl3945"
53
54#include "iwl-fh.h"
55#include "iwl-3945-fh.h"
56#include "iwl-commands.h"
57#include "iwl-sta.h"
58#include "iwl-3945.h"
59#include "iwl-core.h"
60#include "iwl-helpers.h"
61#include "iwl-dev.h"
62#include "iwl-spectrum.h"
63
64/*
65 * module name, copyright, version, etc.
66 */
67
68#define DRV_DESCRIPTION \
69"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
70
71#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
72#define VD "d"
73#else
74#define VD
75#endif
76
77/*
78 * add "s" to indicate spectrum measurement included.
79 * we add it here to be consistent with previous releases in which
80 * this was configurable.
81 */
82#define DRV_VERSION IWLWIFI_VERSION VD "s"
83#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
84#define DRV_AUTHOR "<ilw@linux.intel.com>"
85
86MODULE_DESCRIPTION(DRV_DESCRIPTION);
87MODULE_VERSION(DRV_VERSION);
88MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
89MODULE_LICENSE("GPL");
90
91 /* module parameters */
92struct iwl_mod_params iwl3945_mod_params = {
93 .sw_crypto = 1,
94 .restart_fw = 1,
95 .disable_hw_scan = 1,
96 /* the rest are 0 by default */
97};
98
99/**
100 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
101 * @priv: eeprom and antenna fields are used to determine antenna flags
102 *
103 * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
104 * iwl3945_mod_params.antenna specifies the antenna diversity mode:
105 *
106 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
107 * IWL_ANTENNA_MAIN - Force MAIN antenna
108 * IWL_ANTENNA_AUX - Force AUX antenna
109 */
110__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
111{
112 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
113
114 switch (iwl3945_mod_params.antenna) {
115 case IWL_ANTENNA_DIVERSITY:
116 return 0;
117
118 case IWL_ANTENNA_MAIN:
119 if (eeprom->antenna_switch_type)
120 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
122
123 case IWL_ANTENNA_AUX:
124 if (eeprom->antenna_switch_type)
125 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
126 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
127 }
128
129 /* bad antenna selector value */
130 IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
131 iwl3945_mod_params.antenna);
132
133 return 0; /* "diversity" is default if error */
134}
135
136static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
137 struct ieee80211_key_conf *keyconf,
138 u8 sta_id)
139{
140 unsigned long flags;
141 __le16 key_flags = 0;
142 int ret;
143
144 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
145 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
146
147 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
148 key_flags |= STA_KEY_MULTICAST_MSK;
149
150 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
151 keyconf->hw_key_idx = keyconf->keyidx;
152 key_flags &= ~STA_KEY_FLG_INVALID;
153
154 spin_lock_irqsave(&priv->sta_lock, flags);
155 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
156 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
157 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
158 keyconf->keylen);
159
160 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
161 keyconf->keylen);
162
163 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
164 == STA_KEY_FLG_NO_ENC)
165 priv->stations[sta_id].sta.key.key_offset =
166 iwl_legacy_get_free_ucode_key_index(priv);
167 /* else, we are overriding an existing key => no need to allocated room
168 * in uCode. */
169
170 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
171 "no space for a new key");
172
173 priv->stations[sta_id].sta.key.key_flags = key_flags;
174 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
175 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
176
177 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
178
179 ret = iwl_legacy_send_add_sta(priv,
180 &priv->stations[sta_id].sta, CMD_ASYNC);
181
182 spin_unlock_irqrestore(&priv->sta_lock, flags);
183
184 return ret;
185}
186
187static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
188 struct ieee80211_key_conf *keyconf,
189 u8 sta_id)
190{
191 return -EOPNOTSUPP;
192}
193
194static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
195 struct ieee80211_key_conf *keyconf,
196 u8 sta_id)
197{
198 return -EOPNOTSUPP;
199}
200
201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
202{
203 unsigned long flags;
204 struct iwl_legacy_addsta_cmd sta_cmd;
205
206 spin_lock_irqsave(&priv->sta_lock, flags);
207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
208 memset(&priv->stations[sta_id].sta.key, 0,
209 sizeof(struct iwl4965_keyinfo));
210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
214 spin_unlock_irqrestore(&priv->sta_lock, flags);
215
216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
217 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
218}
219
220static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
221 struct ieee80211_key_conf *keyconf, u8 sta_id)
222{
223 int ret = 0;
224
225 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
226
227 switch (keyconf->cipher) {
228 case WLAN_CIPHER_SUITE_CCMP:
229 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
230 break;
231 case WLAN_CIPHER_SUITE_TKIP:
232 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
233 break;
234 case WLAN_CIPHER_SUITE_WEP40:
235 case WLAN_CIPHER_SUITE_WEP104:
236 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
237 break;
238 default:
239 IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
240 keyconf->cipher);
241 ret = -EINVAL;
242 }
243
244 IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
245 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
246 sta_id, ret);
247
248 return ret;
249}
250
251static int iwl3945_remove_static_key(struct iwl_priv *priv)
252{
253 int ret = -EOPNOTSUPP;
254
255 return ret;
256}
257
258static int iwl3945_set_static_key(struct iwl_priv *priv,
259 struct ieee80211_key_conf *key)
260{
261 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
262 key->cipher == WLAN_CIPHER_SUITE_WEP104)
263 return -EOPNOTSUPP;
264
265 IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
266 return -EINVAL;
267}
268
269static void iwl3945_clear_free_frames(struct iwl_priv *priv)
270{
271 struct list_head *element;
272
273 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
274 priv->frames_count);
275
276 while (!list_empty(&priv->free_frames)) {
277 element = priv->free_frames.next;
278 list_del(element);
279 kfree(list_entry(element, struct iwl3945_frame, list));
280 priv->frames_count--;
281 }
282
283 if (priv->frames_count) {
284 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
285 priv->frames_count);
286 priv->frames_count = 0;
287 }
288}
289
290static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
291{
292 struct iwl3945_frame *frame;
293 struct list_head *element;
294 if (list_empty(&priv->free_frames)) {
295 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
296 if (!frame) {
297 IWL_ERR(priv, "Could not allocate frame!\n");
298 return NULL;
299 }
300
301 priv->frames_count++;
302 return frame;
303 }
304
305 element = priv->free_frames.next;
306 list_del(element);
307 return list_entry(element, struct iwl3945_frame, list);
308}
309
310static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
311{
312 memset(frame, 0, sizeof(*frame));
313 list_add(&frame->list, &priv->free_frames);
314}
315
316unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
317 struct ieee80211_hdr *hdr,
318 int left)
319{
320
321 if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
322 return 0;
323
324 if (priv->beacon_skb->len > left)
325 return 0;
326
327 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
328
329 return priv->beacon_skb->len;
330}
331
332static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
333{
334 struct iwl3945_frame *frame;
335 unsigned int frame_size;
336 int rc;
337 u8 rate;
338
339 frame = iwl3945_get_free_frame(priv);
340
341 if (!frame) {
342 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
343 "command.\n");
344 return -ENOMEM;
345 }
346
347 rate = iwl_legacy_get_lowest_plcp(priv,
348 &priv->contexts[IWL_RXON_CTX_BSS]);
349
350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
351
352 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
353 &frame->u.cmd[0]);
354
355 iwl3945_free_frame(priv, frame);
356
357 return rc;
358}
359
360static void iwl3945_unset_hw_params(struct iwl_priv *priv)
361{
362 if (priv->_3945.shared_virt)
363 dma_free_coherent(&priv->pci_dev->dev,
364 sizeof(struct iwl3945_shared),
365 priv->_3945.shared_virt,
366 priv->_3945.shared_phys);
367}
368
369static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
370 struct ieee80211_tx_info *info,
371 struct iwl_device_cmd *cmd,
372 struct sk_buff *skb_frag,
373 int sta_id)
374{
375 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
376 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
377
378 tx_cmd->sec_ctl = 0;
379
380 switch (keyinfo->cipher) {
381 case WLAN_CIPHER_SUITE_CCMP:
382 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
383 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
384 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
385 break;
386
387 case WLAN_CIPHER_SUITE_TKIP:
388 break;
389
390 case WLAN_CIPHER_SUITE_WEP104:
391 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
392 /* fall through */
393 case WLAN_CIPHER_SUITE_WEP40:
394 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
395 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
396
397 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
398
399 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
400 "with key %d\n", info->control.hw_key->hw_key_idx);
401 break;
402
403 default:
404 IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
405 break;
406 }
407}
408
409/*
410 * handle build REPLY_TX command notification.
411 */
412static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
413 struct iwl_device_cmd *cmd,
414 struct ieee80211_tx_info *info,
415 struct ieee80211_hdr *hdr, u8 std_id)
416{
417 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
418 __le32 tx_flags = tx_cmd->tx_flags;
419 __le16 fc = hdr->frame_control;
420
421 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
423 tx_flags |= TX_CMD_FLG_ACK_MSK;
424 if (ieee80211_is_mgmt(fc))
425 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
426 if (ieee80211_is_probe_resp(fc) &&
427 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
428 tx_flags |= TX_CMD_FLG_TSF_MSK;
429 } else {
430 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
431 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
432 }
433
434 tx_cmd->sta_id = std_id;
435 if (ieee80211_has_morefrags(fc))
436 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
437
438 if (ieee80211_is_data_qos(fc)) {
439 u8 *qc = ieee80211_get_qos_ctl(hdr);
440 tx_cmd->tid_tspec = qc[0] & 0xf;
441 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
442 } else {
443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
444 }
445
446 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
447
448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
449 if (ieee80211_is_mgmt(fc)) {
450 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
451 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
452 else
453 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
454 } else {
455 tx_cmd->timeout.pm_frame_timeout = 0;
456 }
457
458 tx_cmd->driver_txop = 0;
459 tx_cmd->tx_flags = tx_flags;
460 tx_cmd->next_frame_len = 0;
461}
462
463/*
464 * start REPLY_TX command process
465 */
466static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
467{
468 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
469 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
470 struct iwl3945_tx_cmd *tx_cmd;
471 struct iwl_tx_queue *txq = NULL;
472 struct iwl_queue *q = NULL;
473 struct iwl_device_cmd *out_cmd;
474 struct iwl_cmd_meta *out_meta;
475 dma_addr_t phys_addr;
476 dma_addr_t txcmd_phys;
477 int txq_id = skb_get_queue_mapping(skb);
478 u16 len, idx, hdr_len;
479 u8 id;
480 u8 unicast;
481 u8 sta_id;
482 u8 tid = 0;
483 __le16 fc;
484 u8 wait_write_ptr = 0;
485 unsigned long flags;
486
487 spin_lock_irqsave(&priv->lock, flags);
488 if (iwl_legacy_is_rfkill(priv)) {
489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
490 goto drop_unlock;
491 }
492
493 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
494 IWL_ERR(priv, "ERROR: No TX rate available.\n");
495 goto drop_unlock;
496 }
497
498 unicast = !is_multicast_ether_addr(hdr->addr1);
499 id = 0;
500
501 fc = hdr->frame_control;
502
503#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
504 if (ieee80211_is_auth(fc))
505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
506 else if (ieee80211_is_assoc_req(fc))
507 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
508 else if (ieee80211_is_reassoc_req(fc))
509 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
510#endif
511
512 spin_unlock_irqrestore(&priv->lock, flags);
513
514 hdr_len = ieee80211_hdrlen(fc);
515
516 /* Find index into station table for destination station */
517 sta_id = iwl_legacy_sta_id_or_broadcast(
518 priv, &priv->contexts[IWL_RXON_CTX_BSS],
519 info->control.sta);
520 if (sta_id == IWL_INVALID_STATION) {
521 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
522 hdr->addr1);
523 goto drop;
524 }
525
526 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
527
528 if (ieee80211_is_data_qos(fc)) {
529 u8 *qc = ieee80211_get_qos_ctl(hdr);
530 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
531 if (unlikely(tid >= MAX_TID_COUNT))
532 goto drop;
533 }
534
535 /* Descriptor for chosen Tx queue */
536 txq = &priv->txq[txq_id];
537 q = &txq->q;
538
539 if ((iwl_legacy_queue_space(q) < q->high_mark))
540 goto drop;
541
542 spin_lock_irqsave(&priv->lock, flags);
543
544 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
545
546 /* Set up driver data for this TFD */
547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
548 txq->txb[q->write_ptr].skb = skb;
549 txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
550
551 /* Init first empty entry in queue's array of Tx/cmd buffers */
552 out_cmd = txq->cmd[idx];
553 out_meta = &txq->meta[idx];
554 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
555 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
556 memset(tx_cmd, 0, sizeof(*tx_cmd));
557
558 /*
559 * Set up the Tx-command (not MAC!) header.
560 * Store the chosen Tx queue and TFD index within the sequence field;
561 * after Tx, uCode's Tx response will return this value so driver can
562 * locate the frame within the tx queue and do post-tx processing.
563 */
564 out_cmd->hdr.cmd = REPLY_TX;
565 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
566 INDEX_TO_SEQ(q->write_ptr)));
567
568 /* Copy MAC header from skb into command buffer */
569 memcpy(tx_cmd->hdr, hdr, hdr_len);
570
571
572 if (info->control.hw_key)
573 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
574
575 /* TODO need this for burst mode later on */
576 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
577
578 /* set is_hcca to 0; it probably will never be implemented */
579 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
580
581 /* Total # bytes to be transmitted */
582 len = (u16)skb->len;
583 tx_cmd->len = cpu_to_le16(len);
584
585 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
586 iwl_legacy_update_stats(priv, true, fc, len);
587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
589
590 if (!ieee80211_has_morefrags(hdr->frame_control)) {
591 txq->need_update = 1;
592 } else {
593 wait_write_ptr = 1;
594 txq->need_update = 0;
595 }
596
597 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
598 le16_to_cpu(out_cmd->hdr.sequence));
599 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
600 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
601 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
602 ieee80211_hdrlen(fc));
603
604 /*
605 * Use the first empty entry in this queue's command buffer array
606 * to contain the Tx command and MAC header concatenated together
607 * (payload data will be in another buffer).
608 * Size of this varies, due to varying MAC header length.
609 * If end is not dword aligned, we'll have 2 extra bytes at the end
610 * of the MAC header (device reads on dword boundaries).
611 * We'll tell device about this padding later.
612 */
613 len = sizeof(struct iwl3945_tx_cmd) +
614 sizeof(struct iwl_cmd_header) + hdr_len;
615 len = (len + 3) & ~3;
616
617 /* Physical address of this Tx command's header (not MAC header!),
618 * within command buffer array. */
619 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
620 len, PCI_DMA_TODEVICE);
621 /* we do not map meta data ... so we can safely access address to
622 * provide to unmap command*/
623 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
624 dma_unmap_len_set(out_meta, len, len);
625
626 /* Add buffer containing Tx command and MAC(!) header to TFD's
627 * first entry */
628 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
629 txcmd_phys, len, 1, 0);
630
631
632 /* Set up TFD's 2nd entry to point directly to remainder of skb,
633 * if any (802.11 null frames have no payload). */
634 len = skb->len - hdr_len;
635 if (len) {
636 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
637 len, PCI_DMA_TODEVICE);
638 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
639 phys_addr, len,
640 0, U32_PAD(len));
641 }
642
643
644 /* Tell device the write index *just past* this latest filled TFD */
645 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
646 iwl_legacy_txq_update_write_ptr(priv, txq);
647 spin_unlock_irqrestore(&priv->lock, flags);
648
649 if ((iwl_legacy_queue_space(q) < q->high_mark)
650 && priv->mac80211_registered) {
651 if (wait_write_ptr) {
652 spin_lock_irqsave(&priv->lock, flags);
653 txq->need_update = 1;
654 iwl_legacy_txq_update_write_ptr(priv, txq);
655 spin_unlock_irqrestore(&priv->lock, flags);
656 }
657
658 iwl_legacy_stop_queue(priv, txq);
659 }
660
661 return 0;
662
663drop_unlock:
664 spin_unlock_irqrestore(&priv->lock, flags);
665drop:
666 return -1;
667}
668
669static int iwl3945_get_measurement(struct iwl_priv *priv,
670 struct ieee80211_measurement_params *params,
671 u8 type)
672{
673 struct iwl_spectrum_cmd spectrum;
674 struct iwl_rx_packet *pkt;
675 struct iwl_host_cmd cmd = {
676 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
677 .data = (void *)&spectrum,
678 .flags = CMD_WANT_SKB,
679 };
680 u32 add_time = le64_to_cpu(params->start_time);
681 int rc;
682 int spectrum_resp_status;
683 int duration = le16_to_cpu(params->duration);
684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
685
686 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
687 add_time = iwl_legacy_usecs_to_beacons(priv,
688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
689 le16_to_cpu(ctx->timing.beacon_interval));
690
691 memset(&spectrum, 0, sizeof(spectrum));
692
693 spectrum.channel_count = cpu_to_le16(1);
694 spectrum.flags =
695 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
696 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
697 cmd.len = sizeof(spectrum);
698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
699
700 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
701 spectrum.start_time =
702 iwl_legacy_add_beacon_time(priv,
703 priv->_3945.last_beacon_time, add_time,
704 le16_to_cpu(ctx->timing.beacon_interval));
705 else
706 spectrum.start_time = 0;
707
708 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
709 spectrum.channels[0].channel = params->channel;
710 spectrum.channels[0].type = type;
711 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
712 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
714
715 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
716 if (rc)
717 return rc;
718
719 pkt = (struct iwl_rx_packet *)cmd.reply_page;
720 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
721 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
722 rc = -EIO;
723 }
724
725 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
726 switch (spectrum_resp_status) {
727 case 0: /* Command will be handled */
728 if (pkt->u.spectrum.id != 0xff) {
729 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
730 pkt->u.spectrum.id);
731 priv->measurement_status &= ~MEASUREMENT_READY;
732 }
733 priv->measurement_status |= MEASUREMENT_ACTIVE;
734 rc = 0;
735 break;
736
737 case 1: /* Command will not be handled */
738 rc = -EAGAIN;
739 break;
740 }
741
742 iwl_legacy_free_pages(priv, cmd.reply_page);
743
744 return rc;
745}
746
747static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
748 struct iwl_rx_mem_buffer *rxb)
749{
750 struct iwl_rx_packet *pkt = rxb_addr(rxb);
751 struct iwl_alive_resp *palive;
752 struct delayed_work *pwork;
753
754 palive = &pkt->u.alive_frame;
755
756 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
757 "0x%01X 0x%01X\n",
758 palive->is_valid, palive->ver_type,
759 palive->ver_subtype);
760
761 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
762 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
763 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
764 sizeof(struct iwl_alive_resp));
765 pwork = &priv->init_alive_start;
766 } else {
767 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
768 memcpy(&priv->card_alive, &pkt->u.alive_frame,
769 sizeof(struct iwl_alive_resp));
770 pwork = &priv->alive_start;
771 iwl3945_disable_events(priv);
772 }
773
774 /* We delay the ALIVE response by 5ms to
775 * give the HW RF Kill time to activate... */
776 if (palive->is_valid == UCODE_VALID_OK)
777 queue_delayed_work(priv->workqueue, pwork,
778 msecs_to_jiffies(5));
779 else
780 IWL_WARN(priv, "uCode did not respond OK.\n");
781}
782
783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
784 struct iwl_rx_mem_buffer *rxb)
785{
786#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
787 struct iwl_rx_packet *pkt = rxb_addr(rxb);
788#endif
789
790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
791}
792
793static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
794 struct iwl_rx_mem_buffer *rxb)
795{
796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
797 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
798#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
799 u8 rate = beacon->beacon_notify_hdr.rate;
800
801 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
802 "tsf %d %d rate %d\n",
803 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
804 beacon->beacon_notify_hdr.failure_frame,
805 le32_to_cpu(beacon->ibss_mgr_status),
806 le32_to_cpu(beacon->high_tsf),
807 le32_to_cpu(beacon->low_tsf), rate);
808#endif
809
810 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
811
812}
813
814/* Handle notification from uCode that card's power state is changing
815 * due to software, hardware, or critical temperature RFKILL */
816static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
817 struct iwl_rx_mem_buffer *rxb)
818{
819 struct iwl_rx_packet *pkt = rxb_addr(rxb);
820 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
821 unsigned long status = priv->status;
822
823 IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
824 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
825 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
826
827 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
828 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
829
830 if (flags & HW_CARD_DISABLED)
831 set_bit(STATUS_RF_KILL_HW, &priv->status);
832 else
833 clear_bit(STATUS_RF_KILL_HW, &priv->status);
834
835
836 iwl_legacy_scan_cancel(priv);
837
838 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
839 test_bit(STATUS_RF_KILL_HW, &priv->status)))
840 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
841 test_bit(STATUS_RF_KILL_HW, &priv->status));
842 else
843 wake_up(&priv->wait_command_queue);
844}
845
846/**
847 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
848 *
849 * Setup the RX handlers for each of the reply types sent from the uCode
850 * to the host.
851 *
852 * This function chains into the hardware specific files for them to setup
853 * any hardware specific handlers as well.
854 */
855static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
856{
857 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
858 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
859 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
860 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
861 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
862 iwl_legacy_rx_spectrum_measure_notif;
863 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
864 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
865 iwl_legacy_rx_pm_debug_statistics_notif;
866 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
867
868 /*
869 * The same handler is used for both the REPLY to a discrete
870 * statistics request from the host as well as for the periodic
871 * statistics notifications (after received beacons) from the uCode.
872 */
873 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
874 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
875
876 iwl_legacy_setup_rx_scan_handlers(priv);
877 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
878
879 /* Set up hardware specific Rx handlers */
880 iwl3945_hw_rx_handler_setup(priv);
881}
882
883/************************** RX-FUNCTIONS ****************************/
884/*
885 * Rx theory of operation
886 *
887 * The host allocates 32 DMA target addresses and passes the host address
888 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
889 * 0 to 31
890 *
891 * Rx Queue Indexes
892 * The host/firmware share two index registers for managing the Rx buffers.
893 *
894 * The READ index maps to the first position that the firmware may be writing
895 * to -- the driver can read up to (but not including) this position and get
896 * good data.
897 * The READ index is managed by the firmware once the card is enabled.
898 *
899 * The WRITE index maps to the last position the driver has read from -- the
900 * position preceding WRITE is the last slot the firmware can place a packet.
901 *
902 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
903 * WRITE = READ.
904 *
905 * During initialization, the host sets up the READ queue position to the first
906 * INDEX position, and WRITE to the last (READ - 1 wrapped)
907 *
908 * When the firmware places a packet in a buffer, it will advance the READ index
909 * and fire the RX interrupt. The driver can then query the READ index and
910 * process as many packets as possible, moving the WRITE index forward as it
911 * resets the Rx queue buffers with new memory.
912 *
913 * The management in the driver is as follows:
914 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
915 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
916 * to replenish the iwl->rxq->rx_free.
917 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
918 * iwl->rxq is replenished and the READ INDEX is updated (updating the
919 * 'processed' and 'read' driver indexes as well)
920 * + A received packet is processed and handed to the kernel network stack,
921 * detached from the iwl->rxq. The driver 'processed' index is updated.
922 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
923 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
924 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
925 * were enough free buffers and RX_STALLED is set it is cleared.
926 *
927 *
928 * Driver sequence:
929 *
930 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
931 * iwl3945_rx_queue_restock
932 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
933 * queue, updates firmware pointers, and updates
934 * the WRITE index. If insufficient rx_free buffers
935 * are available, schedules iwl3945_rx_replenish
936 *
937 * -- enable interrupts --
938 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
939 * READ INDEX, detaching the SKB from the pool.
940 * Moves the packet buffer from queue to rx_used.
941 * Calls iwl3945_rx_queue_restock to refill any empty
942 * slots.
943 * ...
944 *
945 */
946
947/**
948 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
949 */
950static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
951 dma_addr_t dma_addr)
952{
953 return cpu_to_le32((u32)dma_addr);
954}
955
956/**
957 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
958 *
959 * If there are slots in the RX queue that need to be restocked,
960 * and we have free pre-allocated buffers, fill the ranks as much
961 * as we can, pulling from rx_free.
962 *
963 * This moves the 'write' index forward to catch up with 'processed', and
964 * also updates the memory address in the firmware to reference the new
965 * target buffer.
966 */
967static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
968{
969 struct iwl_rx_queue *rxq = &priv->rxq;
970 struct list_head *element;
971 struct iwl_rx_mem_buffer *rxb;
972 unsigned long flags;
973 int write;
974
975 spin_lock_irqsave(&rxq->lock, flags);
976 write = rxq->write & ~0x7;
977 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
978 /* Get next free Rx buffer, remove from free list */
979 element = rxq->rx_free.next;
980 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
981 list_del(element);
982
983 /* Point to Rx buffer via next RBD in circular buffer */
984 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
985 rxq->queue[rxq->write] = rxb;
986 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
987 rxq->free_count--;
988 }
989 spin_unlock_irqrestore(&rxq->lock, flags);
990 /* If the pre-allocated buffer pool is dropping low, schedule to
991 * refill it */
992 if (rxq->free_count <= RX_LOW_WATERMARK)
993 queue_work(priv->workqueue, &priv->rx_replenish);
994
995
996 /* If we've added more space for the firmware to place data, tell it.
997 * Increment device's write pointer in multiples of 8. */
998 if ((rxq->write_actual != (rxq->write & ~0x7))
999 || (abs(rxq->write - rxq->read) > 7)) {
1000 spin_lock_irqsave(&rxq->lock, flags);
1001 rxq->need_update = 1;
1002 spin_unlock_irqrestore(&rxq->lock, flags);
1003 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1004 }
1005}
1006
1007/**
1008 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
1009 *
1010 * When moving to rx_free an SKB is allocated for the slot.
1011 *
1012 * Also restock the Rx queue via iwl3945_rx_queue_restock.
1013 * This is called as a scheduled work item (except for during initialization)
1014 */
1015static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1016{
1017 struct iwl_rx_queue *rxq = &priv->rxq;
1018 struct list_head *element;
1019 struct iwl_rx_mem_buffer *rxb;
1020 struct page *page;
1021 unsigned long flags;
1022 gfp_t gfp_mask = priority;
1023
1024 while (1) {
1025 spin_lock_irqsave(&rxq->lock, flags);
1026
1027 if (list_empty(&rxq->rx_used)) {
1028 spin_unlock_irqrestore(&rxq->lock, flags);
1029 return;
1030 }
1031 spin_unlock_irqrestore(&rxq->lock, flags);
1032
1033 if (rxq->free_count > RX_LOW_WATERMARK)
1034 gfp_mask |= __GFP_NOWARN;
1035
1036 if (priv->hw_params.rx_page_order > 0)
1037 gfp_mask |= __GFP_COMP;
1038
1039 /* Alloc a new receive buffer */
1040 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1041 if (!page) {
1042 if (net_ratelimit())
1043 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1044 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
1045 net_ratelimit())
1046 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
1047 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
1048 rxq->free_count);
1049 /* We don't reschedule replenish work here -- we will
1050 * call the restock method and if it still needs
1051 * more buffers it will schedule replenish */
1052 break;
1053 }
1054
1055 spin_lock_irqsave(&rxq->lock, flags);
1056 if (list_empty(&rxq->rx_used)) {
1057 spin_unlock_irqrestore(&rxq->lock, flags);
1058 __free_pages(page, priv->hw_params.rx_page_order);
1059 return;
1060 }
1061 element = rxq->rx_used.next;
1062 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1063 list_del(element);
1064 spin_unlock_irqrestore(&rxq->lock, flags);
1065
1066 rxb->page = page;
1067 /* Get physical address of RB/SKB */
1068 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1069 PAGE_SIZE << priv->hw_params.rx_page_order,
1070 PCI_DMA_FROMDEVICE);
1071
1072 spin_lock_irqsave(&rxq->lock, flags);
1073
1074 list_add_tail(&rxb->list, &rxq->rx_free);
1075 rxq->free_count++;
1076 priv->alloc_rxb_page++;
1077
1078 spin_unlock_irqrestore(&rxq->lock, flags);
1079 }
1080}
1081
1082void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1083{
1084 unsigned long flags;
1085 int i;
1086 spin_lock_irqsave(&rxq->lock, flags);
1087 INIT_LIST_HEAD(&rxq->rx_free);
1088 INIT_LIST_HEAD(&rxq->rx_used);
1089 /* Fill the rx_used queue with _all_ of the Rx buffers */
1090 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1091 /* In the reset function, these buffers may have been allocated
1092 * to an SKB, so we need to unmap and free potential storage */
1093 if (rxq->pool[i].page != NULL) {
1094 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1095 PAGE_SIZE << priv->hw_params.rx_page_order,
1096 PCI_DMA_FROMDEVICE);
1097 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1098 rxq->pool[i].page = NULL;
1099 }
1100 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1101 }
1102
1103 /* Set us so that we have processed and used all buffers, but have
1104 * not restocked the Rx queue with fresh buffers */
1105 rxq->read = rxq->write = 0;
1106 rxq->write_actual = 0;
1107 rxq->free_count = 0;
1108 spin_unlock_irqrestore(&rxq->lock, flags);
1109}
1110
1111void iwl3945_rx_replenish(void *data)
1112{
1113 struct iwl_priv *priv = data;
1114 unsigned long flags;
1115
1116 iwl3945_rx_allocate(priv, GFP_KERNEL);
1117
1118 spin_lock_irqsave(&priv->lock, flags);
1119 iwl3945_rx_queue_restock(priv);
1120 spin_unlock_irqrestore(&priv->lock, flags);
1121}
1122
1123static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
1124{
1125 iwl3945_rx_allocate(priv, GFP_ATOMIC);
1126
1127 iwl3945_rx_queue_restock(priv);
1128}
1129
1130
1131/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1132 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1133 * This free routine walks the list of POOL entries and if SKB is set to
1134 * non NULL it is unmapped and freed
1135 */
1136static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1137{
1138 int i;
1139 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1140 if (rxq->pool[i].page != NULL) {
1141 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1142 PAGE_SIZE << priv->hw_params.rx_page_order,
1143 PCI_DMA_FROMDEVICE);
1144 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1145 rxq->pool[i].page = NULL;
1146 }
1147 }
1148
1149 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1150 rxq->bd_dma);
1151 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1152 rxq->rb_stts, rxq->rb_stts_dma);
1153 rxq->bd = NULL;
1154 rxq->rb_stts = NULL;
1155}
1156
1157
1158/* Convert linear signal-to-noise ratio into dB */
1159static u8 ratio2dB[100] = {
1160/* 0 1 2 3 4 5 6 7 8 9 */
1161 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1162 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1163 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1164 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1165 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1166 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1167 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1168 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1169 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1170 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1171};
1172
1173/* Calculates a relative dB value from a ratio of linear
1174 * (i.e. not dB) signal levels.
1175 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1176int iwl3945_calc_db_from_ratio(int sig_ratio)
1177{
1178 /* 1000:1 or higher just report as 60 dB */
1179 if (sig_ratio >= 1000)
1180 return 60;
1181
1182 /* 100:1 or higher, divide by 10 and use table,
1183 * add 20 dB to make up for divide by 10 */
1184 if (sig_ratio >= 100)
1185 return 20 + (int)ratio2dB[sig_ratio/10];
1186
1187 /* We shouldn't see this */
1188 if (sig_ratio < 1)
1189 return 0;
1190
1191 /* Use table for ratios 1:1 - 99:1 */
1192 return (int)ratio2dB[sig_ratio];
1193}
1194
1195/**
1196 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1197 *
1198 * Uses the priv->rx_handlers callback function array to invoke
1199 * the appropriate handlers, including command responses,
1200 * frame-received notifications, and other notifications.
1201 */
1202static void iwl3945_rx_handle(struct iwl_priv *priv)
1203{
1204 struct iwl_rx_mem_buffer *rxb;
1205 struct iwl_rx_packet *pkt;
1206 struct iwl_rx_queue *rxq = &priv->rxq;
1207 u32 r, i;
1208 int reclaim;
1209 unsigned long flags;
1210 u8 fill_rx = 0;
1211 u32 count = 8;
1212 int total_empty = 0;
1213
1214 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1215 * buffer that the driver may process (last buffer filled by ucode). */
1216 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1217 i = rxq->read;
1218
1219 /* calculate total frames need to be restock after handling RX */
1220 total_empty = r - rxq->write_actual;
1221 if (total_empty < 0)
1222 total_empty += RX_QUEUE_SIZE;
1223
1224 if (total_empty > (RX_QUEUE_SIZE / 2))
1225 fill_rx = 1;
1226 /* Rx interrupt, but nothing sent from uCode */
1227 if (i == r)
1228 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1229
1230 while (i != r) {
1231 int len;
1232
1233 rxb = rxq->queue[i];
1234
1235 /* If an RXB doesn't have a Rx queue slot associated with it,
1236 * then a bug has been introduced in the queue refilling
1237 * routines -- catch it here */
1238 BUG_ON(rxb == NULL);
1239
1240 rxq->queue[i] = NULL;
1241
1242 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1243 PAGE_SIZE << priv->hw_params.rx_page_order,
1244 PCI_DMA_FROMDEVICE);
1245 pkt = rxb_addr(rxb);
1246
1247 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1248 len += sizeof(u32); /* account for status word */
1249 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1250
1251 /* Reclaim a command buffer only if this packet is a response
1252 * to a (driver-originated) command.
1253 * If the packet (e.g. Rx frame) originated from uCode,
1254 * there is no command buffer to reclaim.
1255 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1256 * but apparently a few don't get set; catch them here. */
1257 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1258 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
1259 (pkt->hdr.cmd != REPLY_TX);
1260
1261 /* Based on type of command response or notification,
1262 * handle those that need handling via function in
1263 * rx_handlers table. See iwl3945_setup_rx_handlers() */
1264 if (priv->rx_handlers[pkt->hdr.cmd]) {
1265 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1266 iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1267 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1268 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1269 } else {
1270 /* No handling needed */
1271 IWL_DEBUG_RX(priv,
1272 "r %d i %d No handler needed for %s, 0x%02x\n",
1273 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1274 pkt->hdr.cmd);
1275 }
1276
1277 /*
1278 * XXX: After here, we should always check rxb->page
1279 * against NULL before touching it or its virtual
1280 * memory (pkt). Because some rx_handler might have
1281 * already taken or freed the pages.
1282 */
1283
1284 if (reclaim) {
1285 /* Invoke any callbacks, transfer the buffer to caller,
1286 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
1287 * as we reclaim the driver command queue */
1288 if (rxb->page)
1289 iwl_legacy_tx_cmd_complete(priv, rxb);
1290 else
1291 IWL_WARN(priv, "Claim null rxb?\n");
1292 }
1293
1294 /* Reuse the page if possible. For notification packets and
1295 * SKBs that fail to Rx correctly, add them back into the
1296 * rx_free list for reuse later. */
1297 spin_lock_irqsave(&rxq->lock, flags);
1298 if (rxb->page != NULL) {
1299 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1300 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1301 PCI_DMA_FROMDEVICE);
1302 list_add_tail(&rxb->list, &rxq->rx_free);
1303 rxq->free_count++;
1304 } else
1305 list_add_tail(&rxb->list, &rxq->rx_used);
1306
1307 spin_unlock_irqrestore(&rxq->lock, flags);
1308
1309 i = (i + 1) & RX_QUEUE_MASK;
1310 /* If there are a lot of unused frames,
1311 * restock the Rx queue so ucode won't assert. */
1312 if (fill_rx) {
1313 count++;
1314 if (count >= 8) {
1315 rxq->read = i;
1316 iwl3945_rx_replenish_now(priv);
1317 count = 0;
1318 }
1319 }
1320 }
1321
1322 /* Backtrack one entry */
1323 rxq->read = i;
1324 if (fill_rx)
1325 iwl3945_rx_replenish_now(priv);
1326 else
1327 iwl3945_rx_queue_restock(priv);
1328}
1329
1330/* call this function to flush any scheduled tasklet */
1331static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1332{
1333 /* wait to make sure we flush pending tasklet*/
1334 synchronize_irq(priv->pci_dev->irq);
1335 tasklet_kill(&priv->irq_tasklet);
1336}
1337
1338static const char *iwl3945_desc_lookup(int i)
1339{
1340 switch (i) {
1341 case 1:
1342 return "FAIL";
1343 case 2:
1344 return "BAD_PARAM";
1345 case 3:
1346 return "BAD_CHECKSUM";
1347 case 4:
1348 return "NMI_INTERRUPT";
1349 case 5:
1350 return "SYSASSERT";
1351 case 6:
1352 return "FATAL_ERROR";
1353 }
1354
1355 return "UNKNOWN";
1356}
1357
1358#define ERROR_START_OFFSET (1 * sizeof(u32))
1359#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1360
1361void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1362{
1363 u32 i;
1364 u32 desc, time, count, base, data1;
1365 u32 blink1, blink2, ilink1, ilink2;
1366
1367 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1368
1369 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1370 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1371 return;
1372 }
1373
1374
1375 count = iwl_legacy_read_targ_mem(priv, base);
1376
1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1378 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1379 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1380 priv->status, count);
1381 }
1382
1383 IWL_ERR(priv, "Desc Time asrtPC blink2 "
1384 "ilink1 nmiPC Line\n");
1385 for (i = ERROR_START_OFFSET;
1386 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1387 i += ERROR_ELEM_SIZE) {
1388 desc = iwl_legacy_read_targ_mem(priv, base + i);
1389 time =
1390 iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1391 blink1 =
1392 iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1393 blink2 =
1394 iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1395 ilink1 =
1396 iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1397 ilink2 =
1398 iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1399 data1 =
1400 iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1401
1402 IWL_ERR(priv,
1403 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1404 iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1405 ilink1, ilink2, data1);
1406 trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1407 0, blink1, blink2, ilink1, ilink2);
1408 }
1409}
1410
1411static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1412{
1413 u32 inta, handled = 0;
1414 u32 inta_fh;
1415 unsigned long flags;
1416#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1417 u32 inta_mask;
1418#endif
1419
1420 spin_lock_irqsave(&priv->lock, flags);
1421
1422 /* Ack/clear/reset pending uCode interrupts.
1423 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1424 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1425 inta = iwl_read32(priv, CSR_INT);
1426 iwl_write32(priv, CSR_INT, inta);
1427
1428 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1429 * Any new interrupts that happen after this, either while we're
1430 * in this tasklet, or later, will show up in next ISR/tasklet. */
1431 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1432 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1433
1434#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1435 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1436 /* just for debug */
1437 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1438 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1439 inta, inta_mask, inta_fh);
1440 }
1441#endif
1442
1443 spin_unlock_irqrestore(&priv->lock, flags);
1444
1445 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1446 * atomic, make sure that inta covers all the interrupts that
1447 * we've discovered, even if FH interrupt came in just after
1448 * reading CSR_INT. */
1449 if (inta_fh & CSR39_FH_INT_RX_MASK)
1450 inta |= CSR_INT_BIT_FH_RX;
1451 if (inta_fh & CSR39_FH_INT_TX_MASK)
1452 inta |= CSR_INT_BIT_FH_TX;
1453
1454 /* Now service all interrupt bits discovered above. */
1455 if (inta & CSR_INT_BIT_HW_ERR) {
1456 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1457
1458 /* Tell the device to stop sending interrupts */
1459 iwl_legacy_disable_interrupts(priv);
1460
1461 priv->isr_stats.hw++;
1462 iwl_legacy_irq_handle_error(priv);
1463
1464 handled |= CSR_INT_BIT_HW_ERR;
1465
1466 return;
1467 }
1468
1469#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1470 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1471 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1472 if (inta & CSR_INT_BIT_SCD) {
1473 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
1474 "the frame/frames.\n");
1475 priv->isr_stats.sch++;
1476 }
1477
1478 /* Alive notification via Rx interrupt will do the real work */
1479 if (inta & CSR_INT_BIT_ALIVE) {
1480 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
1481 priv->isr_stats.alive++;
1482 }
1483 }
1484#endif
1485 /* Safely ignore these bits for debug checks below */
1486 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1487
1488 /* Error detected by uCode */
1489 if (inta & CSR_INT_BIT_SW_ERR) {
1490 IWL_ERR(priv, "Microcode SW error detected. "
1491 "Restarting 0x%X.\n", inta);
1492 priv->isr_stats.sw++;
1493 iwl_legacy_irq_handle_error(priv);
1494 handled |= CSR_INT_BIT_SW_ERR;
1495 }
1496
1497 /* uCode wakes up after power-down sleep */
1498 if (inta & CSR_INT_BIT_WAKEUP) {
1499 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1500 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1501 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1502 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1503 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1504 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1505 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1506 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1507
1508 priv->isr_stats.wakeup++;
1509 handled |= CSR_INT_BIT_WAKEUP;
1510 }
1511
1512 /* All uCode command responses, including Tx command responses,
1513 * Rx "responses" (frame-received notification), and other
1514 * notifications from uCode come through here*/
1515 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1516 iwl3945_rx_handle(priv);
1517 priv->isr_stats.rx++;
1518 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1519 }
1520
1521 if (inta & CSR_INT_BIT_FH_TX) {
1522 IWL_DEBUG_ISR(priv, "Tx interrupt\n");
1523 priv->isr_stats.tx++;
1524
1525 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1526 iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1527 (FH39_SRVC_CHNL), 0x0);
1528 handled |= CSR_INT_BIT_FH_TX;
1529 }
1530
1531 if (inta & ~handled) {
1532 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1533 priv->isr_stats.unhandled++;
1534 }
1535
1536 if (inta & ~priv->inta_mask) {
1537 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1538 inta & ~priv->inta_mask);
1539 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1540 }
1541
1542 /* Re-enable all interrupts */
1543 /* only Re-enable if disabled by irq */
1544 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1545 iwl_legacy_enable_interrupts(priv);
1546
1547#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1548 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1549 inta = iwl_read32(priv, CSR_INT);
1550 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1551 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1552 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1553 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1554 }
1555#endif
1556}
1557
1558static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1559 enum ieee80211_band band,
1560 u8 is_active, u8 n_probes,
1561 struct iwl3945_scan_channel *scan_ch,
1562 struct ieee80211_vif *vif)
1563{
1564 struct ieee80211_channel *chan;
1565 const struct ieee80211_supported_band *sband;
1566 const struct iwl_channel_info *ch_info;
1567 u16 passive_dwell = 0;
1568 u16 active_dwell = 0;
1569 int added, i;
1570
1571 sband = iwl_get_hw_mode(priv, band);
1572 if (!sband)
1573 return 0;
1574
1575 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1576 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1577
1578 if (passive_dwell <= active_dwell)
1579 passive_dwell = active_dwell + 1;
1580
1581 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1582 chan = priv->scan_request->channels[i];
1583
1584 if (chan->band != band)
1585 continue;
1586
1587 scan_ch->channel = chan->hw_value;
1588
1589 ch_info = iwl_legacy_get_channel_info(priv, band,
1590 scan_ch->channel);
1591 if (!iwl_legacy_is_channel_valid(ch_info)) {
1592 IWL_DEBUG_SCAN(priv,
1593 "Channel %d is INVALID for this band.\n",
1594 scan_ch->channel);
1595 continue;
1596 }
1597
1598 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1599 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1600 /* If passive , set up for auto-switch
1601 * and use long active_dwell time.
1602 */
1603 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1604 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1605 scan_ch->type = 0; /* passive */
1606 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1607 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
1608 } else {
1609 scan_ch->type = 1; /* active */
1610 }
1611
1612 /* Set direct probe bits. These may be used both for active
1613 * scan channels (probes gets sent right away),
1614 * or for passive channels (probes get se sent only after
1615 * hearing clear Rx packet).*/
1616 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
1617 if (n_probes)
1618 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1619 } else {
1620 /* uCode v1 does not allow setting direct probe bits on
1621 * passive channel. */
1622 if ((scan_ch->type & 1) && n_probes)
1623 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1624 }
1625
1626 /* Set txpower levels to defaults */
1627 scan_ch->tpc.dsp_atten = 110;
1628 /* scan_pwr_info->tpc.dsp_atten; */
1629
1630 /*scan_pwr_info->tpc.tx_gain; */
1631 if (band == IEEE80211_BAND_5GHZ)
1632 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1633 else {
1634 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1635 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1636 * power level:
1637 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1638 */
1639 }
1640
1641 IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
1642 scan_ch->channel,
1643 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1644 (scan_ch->type & 1) ?
1645 active_dwell : passive_dwell);
1646
1647 scan_ch++;
1648 added++;
1649 }
1650
1651 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1652 return added;
1653}
1654
1655static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1656 struct ieee80211_rate *rates)
1657{
1658 int i;
1659
1660 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1661 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
1662 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1663 rates[i].hw_value_short = i;
1664 rates[i].flags = 0;
1665 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
1666 /*
1667 * If CCK != 1M then set short preamble rate flag.
1668 */
1669 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
1670 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1671 }
1672 }
1673}
1674
1675/******************************************************************************
1676 *
1677 * uCode download functions
1678 *
1679 ******************************************************************************/
1680
1681static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1682{
1683 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1684 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1685 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1686 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1687 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1688 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1689}
1690
1691/**
1692 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
1693 * looking at all data.
1694 */
1695static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
1696{
1697 u32 val;
1698 u32 save_len = len;
1699 int rc = 0;
1700 u32 errcnt;
1701
1702 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1703
1704 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1705 IWL39_RTC_INST_LOWER_BOUND);
1706
1707 errcnt = 0;
1708 for (; len > 0; len -= sizeof(u32), image++) {
1709 /* read data comes through single port, auto-incr addr */
1710 /* NOTE: Use the debugless read so we don't flood kernel log
1711 * if IWL_DL_IO is set */
1712 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1713 if (val != le32_to_cpu(*image)) {
1714 IWL_ERR(priv, "uCode INST section is invalid at "
1715 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1716 save_len - len, val, le32_to_cpu(*image));
1717 rc = -EIO;
1718 errcnt++;
1719 if (errcnt >= 20)
1720 break;
1721 }
1722 }
1723
1724
1725 if (!errcnt)
1726 IWL_DEBUG_INFO(priv,
1727 "ucode image in INSTRUCTION memory is good\n");
1728
1729 return rc;
1730}
1731
1732
1733/**
1734 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1735 * using sample data 100 bytes apart. If these sample points are good,
1736 * it's a pretty good bet that everything between them is good, too.
1737 */
1738static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1739{
1740 u32 val;
1741 int rc = 0;
1742 u32 errcnt = 0;
1743 u32 i;
1744
1745 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1746
1747 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1748 /* read data comes through single port, auto-incr addr */
1749 /* NOTE: Use the debugless read so we don't flood kernel log
1750 * if IWL_DL_IO is set */
1751 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1752 i + IWL39_RTC_INST_LOWER_BOUND);
1753 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1754 if (val != le32_to_cpu(*image)) {
1755#if 0 /* Enable this if you want to see details */
1756 IWL_ERR(priv, "uCode INST section is invalid at "
1757 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1758 i, val, *image);
1759#endif
1760 rc = -EIO;
1761 errcnt++;
1762 if (errcnt >= 3)
1763 break;
1764 }
1765 }
1766
1767 return rc;
1768}
1769
1770
1771/**
1772 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
1773 * and verify its contents
1774 */
1775static int iwl3945_verify_ucode(struct iwl_priv *priv)
1776{
1777 __le32 *image;
1778 u32 len;
1779 int rc = 0;
1780
1781 /* Try bootstrap */
1782 image = (__le32 *)priv->ucode_boot.v_addr;
1783 len = priv->ucode_boot.len;
1784 rc = iwl3945_verify_inst_sparse(priv, image, len);
1785 if (rc == 0) {
1786 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
1787 return 0;
1788 }
1789
1790 /* Try initialize */
1791 image = (__le32 *)priv->ucode_init.v_addr;
1792 len = priv->ucode_init.len;
1793 rc = iwl3945_verify_inst_sparse(priv, image, len);
1794 if (rc == 0) {
1795 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
1796 return 0;
1797 }
1798
1799 /* Try runtime/protocol */
1800 image = (__le32 *)priv->ucode_code.v_addr;
1801 len = priv->ucode_code.len;
1802 rc = iwl3945_verify_inst_sparse(priv, image, len);
1803 if (rc == 0) {
1804 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
1805 return 0;
1806 }
1807
1808 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1809
1810 /* Since nothing seems to match, show first several data entries in
1811 * instruction SRAM, so maybe visual inspection will give a clue.
1812 * Selection of bootstrap image (vs. other images) is arbitrary. */
1813 image = (__le32 *)priv->ucode_boot.v_addr;
1814 len = priv->ucode_boot.len;
1815 rc = iwl3945_verify_inst_full(priv, image, len);
1816
1817 return rc;
1818}
1819
1820static void iwl3945_nic_start(struct iwl_priv *priv)
1821{
1822 /* Remove all resets to allow NIC to operate */
1823 iwl_write32(priv, CSR_RESET, 0);
1824}
1825
1826#define IWL3945_UCODE_GET(item) \
1827static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
1828{ \
1829 return le32_to_cpu(ucode->v1.item); \
1830}
1831
1832static u32 iwl3945_ucode_get_header_size(u32 api_ver)
1833{
1834 return 24;
1835}
1836
1837static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
1838{
1839 return (u8 *) ucode->v1.data;
1840}
1841
1842IWL3945_UCODE_GET(inst_size);
1843IWL3945_UCODE_GET(data_size);
1844IWL3945_UCODE_GET(init_size);
1845IWL3945_UCODE_GET(init_data_size);
1846IWL3945_UCODE_GET(boot_size);
1847
1848/**
1849 * iwl3945_read_ucode - Read uCode images from disk file.
1850 *
1851 * Copy into buffers for card to fetch via bus-mastering
1852 */
1853static int iwl3945_read_ucode(struct iwl_priv *priv)
1854{
1855 const struct iwl_ucode_header *ucode;
1856 int ret = -EINVAL, index;
1857 const struct firmware *ucode_raw;
1858 /* firmware file name contains uCode/driver compatibility version */
1859 const char *name_pre = priv->cfg->fw_name_pre;
1860 const unsigned int api_max = priv->cfg->ucode_api_max;
1861 const unsigned int api_min = priv->cfg->ucode_api_min;
1862 char buf[25];
1863 u8 *src;
1864 size_t len;
1865 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1866
1867 /* Ask kernel firmware_class module to get the boot firmware off disk.
1868 * request_firmware() is synchronous, file is in memory on return. */
1869 for (index = api_max; index >= api_min; index--) {
1870 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
1871 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
1872 if (ret < 0) {
1873 IWL_ERR(priv, "%s firmware file req failed: %d\n",
1874 buf, ret);
1875 if (ret == -ENOENT)
1876 continue;
1877 else
1878 goto error;
1879 } else {
1880 if (index < api_max)
1881 IWL_ERR(priv, "Loaded firmware %s, "
1882 "which is deprecated. "
1883 " Please use API v%u instead.\n",
1884 buf, api_max);
1885 IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
1886 "(%zd bytes) from disk\n",
1887 buf, ucode_raw->size);
1888 break;
1889 }
1890 }
1891
1892 if (ret < 0)
1893 goto error;
1894
1895 /* Make sure that we got at least our header! */
1896 if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
1897 IWL_ERR(priv, "File size way too small!\n");
1898 ret = -EINVAL;
1899 goto err_release;
1900 }
1901
1902 /* Data from ucode file: header followed by uCode images */
1903 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1904
1905 priv->ucode_ver = le32_to_cpu(ucode->ver);
1906 api_ver = IWL_UCODE_API(priv->ucode_ver);
1907 inst_size = iwl3945_ucode_get_inst_size(ucode);
1908 data_size = iwl3945_ucode_get_data_size(ucode);
1909 init_size = iwl3945_ucode_get_init_size(ucode);
1910 init_data_size = iwl3945_ucode_get_init_data_size(ucode);
1911 boot_size = iwl3945_ucode_get_boot_size(ucode);
1912 src = iwl3945_ucode_get_data(ucode);
1913
1914 /* api_ver should match the api version forming part of the
1915 * firmware filename ... but we don't check for that and only rely
1916 * on the API version read from firmware header from here on forward */
1917
1918 if (api_ver < api_min || api_ver > api_max) {
1919 IWL_ERR(priv, "Driver unable to support your firmware API. "
1920 "Driver supports v%u, firmware is v%u.\n",
1921 api_max, api_ver);
1922 priv->ucode_ver = 0;
1923 ret = -EINVAL;
1924 goto err_release;
1925 }
1926 if (api_ver != api_max)
1927 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
1928 "got %u. New firmware can be obtained "
1929 "from http://www.intellinuxwireless.org.\n",
1930 api_max, api_ver);
1931
1932 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1933 IWL_UCODE_MAJOR(priv->ucode_ver),
1934 IWL_UCODE_MINOR(priv->ucode_ver),
1935 IWL_UCODE_API(priv->ucode_ver),
1936 IWL_UCODE_SERIAL(priv->ucode_ver));
1937
1938 snprintf(priv->hw->wiphy->fw_version,
1939 sizeof(priv->hw->wiphy->fw_version),
1940 "%u.%u.%u.%u",
1941 IWL_UCODE_MAJOR(priv->ucode_ver),
1942 IWL_UCODE_MINOR(priv->ucode_ver),
1943 IWL_UCODE_API(priv->ucode_ver),
1944 IWL_UCODE_SERIAL(priv->ucode_ver));
1945
1946 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1947 priv->ucode_ver);
1948 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
1949 inst_size);
1950 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
1951 data_size);
1952 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
1953 init_size);
1954 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
1955 init_data_size);
1956 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
1957 boot_size);
1958
1959
1960 /* Verify size of file vs. image size info in file's header */
1961 if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
1962 inst_size + data_size + init_size +
1963 init_data_size + boot_size) {
1964
1965 IWL_DEBUG_INFO(priv,
1966 "uCode file size %zd does not match expected size\n",
1967 ucode_raw->size);
1968 ret = -EINVAL;
1969 goto err_release;
1970 }
1971
1972 /* Verify that uCode images will fit in card's SRAM */
1973 if (inst_size > IWL39_MAX_INST_SIZE) {
1974 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
1975 inst_size);
1976 ret = -EINVAL;
1977 goto err_release;
1978 }
1979
1980 if (data_size > IWL39_MAX_DATA_SIZE) {
1981 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
1982 data_size);
1983 ret = -EINVAL;
1984 goto err_release;
1985 }
1986 if (init_size > IWL39_MAX_INST_SIZE) {
1987 IWL_DEBUG_INFO(priv,
1988 "uCode init instr len %d too large to fit in\n",
1989 init_size);
1990 ret = -EINVAL;
1991 goto err_release;
1992 }
1993 if (init_data_size > IWL39_MAX_DATA_SIZE) {
1994 IWL_DEBUG_INFO(priv,
1995 "uCode init data len %d too large to fit in\n",
1996 init_data_size);
1997 ret = -EINVAL;
1998 goto err_release;
1999 }
2000 if (boot_size > IWL39_MAX_BSM_SIZE) {
2001 IWL_DEBUG_INFO(priv,
2002 "uCode boot instr len %d too large to fit in\n",
2003 boot_size);
2004 ret = -EINVAL;
2005 goto err_release;
2006 }
2007
2008 /* Allocate ucode buffers for card's bus-master loading ... */
2009
2010 /* Runtime instructions and 2 copies of data:
2011 * 1) unmodified from disk
2012 * 2) backup cache for save/restore during power-downs */
2013 priv->ucode_code.len = inst_size;
2014 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2015
2016 priv->ucode_data.len = data_size;
2017 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2018
2019 priv->ucode_data_backup.len = data_size;
2020 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2021
2022 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2023 !priv->ucode_data_backup.v_addr)
2024 goto err_pci_alloc;
2025
2026 /* Initialization instructions and data */
2027 if (init_size && init_data_size) {
2028 priv->ucode_init.len = init_size;
2029 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2030
2031 priv->ucode_init_data.len = init_data_size;
2032 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2033
2034 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2035 goto err_pci_alloc;
2036 }
2037
2038 /* Bootstrap (instructions only, no data) */
2039 if (boot_size) {
2040 priv->ucode_boot.len = boot_size;
2041 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2042
2043 if (!priv->ucode_boot.v_addr)
2044 goto err_pci_alloc;
2045 }
2046
2047 /* Copy images into buffers for card's bus-master reads ... */
2048
2049 /* Runtime instructions (first block of data in file) */
2050 len = inst_size;
2051 IWL_DEBUG_INFO(priv,
2052 "Copying (but not loading) uCode instr len %zd\n", len);
2053 memcpy(priv->ucode_code.v_addr, src, len);
2054 src += len;
2055
2056 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2057 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
2058
2059 /* Runtime data (2nd block)
2060 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
2061 len = data_size;
2062 IWL_DEBUG_INFO(priv,
2063 "Copying (but not loading) uCode data len %zd\n", len);
2064 memcpy(priv->ucode_data.v_addr, src, len);
2065 memcpy(priv->ucode_data_backup.v_addr, src, len);
2066 src += len;
2067
2068 /* Initialization instructions (3rd block) */
2069 if (init_size) {
2070 len = init_size;
2071 IWL_DEBUG_INFO(priv,
2072 "Copying (but not loading) init instr len %zd\n", len);
2073 memcpy(priv->ucode_init.v_addr, src, len);
2074 src += len;
2075 }
2076
2077 /* Initialization data (4th block) */
2078 if (init_data_size) {
2079 len = init_data_size;
2080 IWL_DEBUG_INFO(priv,
2081 "Copying (but not loading) init data len %zd\n", len);
2082 memcpy(priv->ucode_init_data.v_addr, src, len);
2083 src += len;
2084 }
2085
2086 /* Bootstrap instructions (5th block) */
2087 len = boot_size;
2088 IWL_DEBUG_INFO(priv,
2089 "Copying (but not loading) boot instr len %zd\n", len);
2090 memcpy(priv->ucode_boot.v_addr, src, len);
2091
2092 /* We have our copies now, allow OS release its copies */
2093 release_firmware(ucode_raw);
2094 return 0;
2095
2096 err_pci_alloc:
2097 IWL_ERR(priv, "failed to allocate pci memory\n");
2098 ret = -ENOMEM;
2099 iwl3945_dealloc_ucode_pci(priv);
2100
2101 err_release:
2102 release_firmware(ucode_raw);
2103
2104 error:
2105 return ret;
2106}
2107
2108
2109/**
2110 * iwl3945_set_ucode_ptrs - Set uCode address location
2111 *
2112 * Tell initialization uCode where to find runtime uCode.
2113 *
2114 * BSM registers initially contain pointers to initialization uCode.
2115 * We need to replace them to load runtime uCode inst and data,
2116 * and to save runtime data when powering down.
2117 */
2118static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2119{
2120 dma_addr_t pinst;
2121 dma_addr_t pdata;
2122
2123 /* bits 31:0 for 3945 */
2124 pinst = priv->ucode_code.p_addr;
2125 pdata = priv->ucode_data_backup.p_addr;
2126
2127 /* Tell bootstrap uCode where to find image to load */
2128 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2129 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2130 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2131 priv->ucode_data.len);
2132
2133 /* Inst byte count must be last to set up, bit 31 signals uCode
2134 * that all new ptr/size info is in place */
2135 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2136 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2137
2138 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
2139
2140 return 0;
2141}
2142
2143/**
2144 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
2145 *
2146 * Called after REPLY_ALIVE notification received from "initialize" uCode.
2147 *
2148 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2149 */
2150static void iwl3945_init_alive_start(struct iwl_priv *priv)
2151{
2152 /* Check alive response for "valid" sign from uCode */
2153 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
2154 /* We had an error bringing up the hardware, so take it
2155 * all the way back down so we can try again */
2156 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
2157 goto restart;
2158 }
2159
2160 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2161 * This is a paranoid check, because we would not have gotten the
2162 * "initialize" alive if code weren't properly loaded. */
2163 if (iwl3945_verify_ucode(priv)) {
2164 /* Runtime instruction load was bad;
2165 * take it all the way back down so we can try again */
2166 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
2167 goto restart;
2168 }
2169
2170 /* Send pointers to protocol/runtime uCode image ... init code will
2171 * load and launch runtime uCode, which will send us another "Alive"
2172 * notification. */
2173 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
2174 if (iwl3945_set_ucode_ptrs(priv)) {
2175 /* Runtime instruction load won't happen;
2176 * take it all the way back down so we can try again */
2177 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
2178 goto restart;
2179 }
2180 return;
2181
2182 restart:
2183 queue_work(priv->workqueue, &priv->restart);
2184}
2185
2186/**
2187 * iwl3945_alive_start - called after REPLY_ALIVE notification received
2188 * from protocol/runtime uCode (initialization uCode's
2189 * Alive gets handled by iwl3945_init_alive_start()).
2190 */
2191static void iwl3945_alive_start(struct iwl_priv *priv)
2192{
2193 int thermal_spin = 0;
2194 u32 rfkill;
2195 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2196
2197 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2198
2199 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2200 /* We had an error bringing up the hardware, so take it
2201 * all the way back down so we can try again */
2202 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2203 goto restart;
2204 }
2205
2206 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2207 * This is a paranoid check, because we would not have gotten the
2208 * "runtime" alive if code weren't properly loaded. */
2209 if (iwl3945_verify_ucode(priv)) {
2210 /* Runtime instruction load was bad;
2211 * take it all the way back down so we can try again */
2212 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2213 goto restart;
2214 }
2215
2216 rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
2217 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2218
2219 if (rfkill & 0x1) {
2220 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2221 /* if RFKILL is not on, then wait for thermal
2222 * sensor in adapter to kick in */
2223 while (iwl3945_hw_get_temperature(priv) == 0) {
2224 thermal_spin++;
2225 udelay(10);
2226 }
2227
2228 if (thermal_spin)
2229 IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
2230 thermal_spin * 10);
2231 } else
2232 set_bit(STATUS_RF_KILL_HW, &priv->status);
2233
2234 /* After the ALIVE response, we can send commands to 3945 uCode */
2235 set_bit(STATUS_ALIVE, &priv->status);
2236
2237 /* Enable watchdog to monitor the driver tx queues */
2238 iwl_legacy_setup_watchdog(priv);
2239
2240 if (iwl_legacy_is_rfkill(priv))
2241 return;
2242
2243 ieee80211_wake_queues(priv->hw);
2244
2245 priv->active_rate = IWL_RATES_MASK_3945;
2246
2247 iwl_legacy_power_update_mode(priv, true);
2248
2249 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2250 struct iwl3945_rxon_cmd *active_rxon =
2251 (struct iwl3945_rxon_cmd *)(&ctx->active);
2252
2253 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2254 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2255 } else {
2256 /* Initialize our rx_config data */
2257 iwl_legacy_connection_init_rx_config(priv, ctx);
2258 }
2259
2260 /* Configure Bluetooth device coexistence support */
2261 iwl_legacy_send_bt_config(priv);
2262
2263 set_bit(STATUS_READY, &priv->status);
2264
2265 /* Configure the adapter for unassociated operation */
2266 iwl3945_commit_rxon(priv, ctx);
2267
2268 iwl3945_reg_txpower_periodic(priv);
2269
2270 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2271 wake_up(&priv->wait_command_queue);
2272
2273 return;
2274
2275 restart:
2276 queue_work(priv->workqueue, &priv->restart);
2277}
2278
2279static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
2280
2281static void __iwl3945_down(struct iwl_priv *priv)
2282{
2283 unsigned long flags;
2284 int exit_pending;
2285
2286 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2287
2288 iwl_legacy_scan_cancel_timeout(priv, 200);
2289
2290 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2291
2292 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2293 * to prevent rearm timer */
2294 del_timer_sync(&priv->watchdog);
2295
2296 /* Station information will now be cleared in device */
2297 iwl_legacy_clear_ucode_stations(priv, NULL);
2298 iwl_legacy_dealloc_bcast_stations(priv);
2299 iwl_legacy_clear_driver_stations(priv);
2300
2301 /* Unblock any waiting calls */
2302 wake_up_all(&priv->wait_command_queue);
2303
2304 /* Wipe out the EXIT_PENDING status bit if we are not actually
2305 * exiting the module */
2306 if (!exit_pending)
2307 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2308
2309 /* stop and reset the on-board processor */
2310 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2311
2312 /* tell the device to stop sending interrupts */
2313 spin_lock_irqsave(&priv->lock, flags);
2314 iwl_legacy_disable_interrupts(priv);
2315 spin_unlock_irqrestore(&priv->lock, flags);
2316 iwl3945_synchronize_irq(priv);
2317
2318 if (priv->mac80211_registered)
2319 ieee80211_stop_queues(priv->hw);
2320
2321 /* If we have not previously called iwl3945_init() then
2322 * clear all bits but the RF Kill bits and return */
2323 if (!iwl_legacy_is_init(priv)) {
2324 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2325 STATUS_RF_KILL_HW |
2326 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2327 STATUS_GEO_CONFIGURED |
2328 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2329 STATUS_EXIT_PENDING;
2330 goto exit;
2331 }
2332
2333 /* ...otherwise clear out all the status bits but the RF Kill
2334 * bit and continue taking the NIC down. */
2335 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2336 STATUS_RF_KILL_HW |
2337 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2338 STATUS_GEO_CONFIGURED |
2339 test_bit(STATUS_FW_ERROR, &priv->status) <<
2340 STATUS_FW_ERROR |
2341 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2342 STATUS_EXIT_PENDING;
2343
2344 iwl3945_hw_txq_ctx_stop(priv);
2345 iwl3945_hw_rxq_stop(priv);
2346
2347 /* Power-down device's busmaster DMA clocks */
2348 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2349 udelay(5);
2350
2351 /* Stop the device, and put it in low power state */
2352 iwl_legacy_apm_stop(priv);
2353
2354 exit:
2355 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2356
2357 if (priv->beacon_skb)
2358 dev_kfree_skb(priv->beacon_skb);
2359 priv->beacon_skb = NULL;
2360
2361 /* clear out any free frames */
2362 iwl3945_clear_free_frames(priv);
2363}
2364
2365static void iwl3945_down(struct iwl_priv *priv)
2366{
2367 mutex_lock(&priv->mutex);
2368 __iwl3945_down(priv);
2369 mutex_unlock(&priv->mutex);
2370
2371 iwl3945_cancel_deferred_work(priv);
2372}
2373
2374#define MAX_HW_RESTARTS 5
2375
2376static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2377{
2378 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2379 unsigned long flags;
2380 u8 sta_id;
2381
2382 spin_lock_irqsave(&priv->sta_lock, flags);
2383 sta_id = iwl_legacy_prep_station(priv, ctx,
2384 iwlegacy_bcast_addr, false, NULL);
2385 if (sta_id == IWL_INVALID_STATION) {
2386 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2387 spin_unlock_irqrestore(&priv->sta_lock, flags);
2388
2389 return -EINVAL;
2390 }
2391
2392 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
2393 priv->stations[sta_id].used |= IWL_STA_BCAST;
2394 spin_unlock_irqrestore(&priv->sta_lock, flags);
2395
2396 return 0;
2397}
2398
2399static int __iwl3945_up(struct iwl_priv *priv)
2400{
2401 int rc, i;
2402
2403 rc = iwl3945_alloc_bcast_station(priv);
2404 if (rc)
2405 return rc;
2406
2407 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2408 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2409 return -EIO;
2410 }
2411
2412 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2413 IWL_ERR(priv, "ucode not available for device bring up\n");
2414 return -EIO;
2415 }
2416
2417 /* If platform's RF_KILL switch is NOT set to KILL */
2418 if (iwl_read32(priv, CSR_GP_CNTRL) &
2419 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2420 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2421 else {
2422 set_bit(STATUS_RF_KILL_HW, &priv->status);
2423 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2424 return -ENODEV;
2425 }
2426
2427 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2428
2429 rc = iwl3945_hw_nic_init(priv);
2430 if (rc) {
2431 IWL_ERR(priv, "Unable to int nic\n");
2432 return rc;
2433 }
2434
2435 /* make sure rfkill handshake bits are cleared */
2436 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2437 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2438 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2439
2440 /* clear (again), then enable host interrupts */
2441 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2442 iwl_legacy_enable_interrupts(priv);
2443
2444 /* really make sure rfkill handshake bits are cleared */
2445 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2446 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2447
2448 /* Copy original ucode data image from disk into backup cache.
2449 * This will be used to initialize the on-board processor's
2450 * data SRAM for a clean start when the runtime program first loads. */
2451 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2452 priv->ucode_data.len);
2453
2454 /* We return success when we resume from suspend and rf_kill is on. */
2455 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
2456 return 0;
2457
2458 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2459
2460 /* load bootstrap state machine,
2461 * load bootstrap program into processor's memory,
2462 * prepare to load the "initialize" uCode */
2463 rc = priv->cfg->ops->lib->load_ucode(priv);
2464
2465 if (rc) {
2466 IWL_ERR(priv,
2467 "Unable to set up bootstrap uCode: %d\n", rc);
2468 continue;
2469 }
2470
2471 /* start card; "initialize" will load runtime ucode */
2472 iwl3945_nic_start(priv);
2473
2474 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2475
2476 return 0;
2477 }
2478
2479 set_bit(STATUS_EXIT_PENDING, &priv->status);
2480 __iwl3945_down(priv);
2481 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2482
2483 /* tried to restart and config the device for as long as our
2484 * patience could withstand */
2485 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2486 return -EIO;
2487}
2488
2489
2490/*****************************************************************************
2491 *
2492 * Workqueue callbacks
2493 *
2494 *****************************************************************************/
2495
2496static void iwl3945_bg_init_alive_start(struct work_struct *data)
2497{
2498 struct iwl_priv *priv =
2499 container_of(data, struct iwl_priv, init_alive_start.work);
2500
2501 mutex_lock(&priv->mutex);
2502 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2503 goto out;
2504
2505 iwl3945_init_alive_start(priv);
2506out:
2507 mutex_unlock(&priv->mutex);
2508}
2509
2510static void iwl3945_bg_alive_start(struct work_struct *data)
2511{
2512 struct iwl_priv *priv =
2513 container_of(data, struct iwl_priv, alive_start.work);
2514
2515 mutex_lock(&priv->mutex);
2516 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2517 goto out;
2518
2519 iwl3945_alive_start(priv);
2520out:
2521 mutex_unlock(&priv->mutex);
2522}
2523
2524/*
2525 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2526 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2527 * *is* readable even when device has been SW_RESET into low power mode
2528 * (e.g. during RF KILL).
2529 */
2530static void iwl3945_rfkill_poll(struct work_struct *data)
2531{
2532 struct iwl_priv *priv =
2533 container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
2534 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2535 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2536 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2537
2538 if (new_rfkill != old_rfkill) {
2539 if (new_rfkill)
2540 set_bit(STATUS_RF_KILL_HW, &priv->status);
2541 else
2542 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2543
2544 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2545
2546 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2547 new_rfkill ? "disable radio" : "enable radio");
2548 }
2549
2550 /* Keep this running, even if radio now enabled. This will be
2551 * cancelled in mac_start() if system decides to start again */
2552 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2553 round_jiffies_relative(2 * HZ));
2554
2555}
2556
2557int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2558{
2559 struct iwl_host_cmd cmd = {
2560 .id = REPLY_SCAN_CMD,
2561 .len = sizeof(struct iwl3945_scan_cmd),
2562 .flags = CMD_SIZE_HUGE,
2563 };
2564 struct iwl3945_scan_cmd *scan;
2565 u8 n_probes = 0;
2566 enum ieee80211_band band;
2567 bool is_active = false;
2568 int ret;
2569 u16 len;
2570
2571 lockdep_assert_held(&priv->mutex);
2572
2573 if (!priv->scan_cmd) {
2574 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2575 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2576 if (!priv->scan_cmd) {
2577 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2578 return -ENOMEM;
2579 }
2580 }
2581 scan = priv->scan_cmd;
2582 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
2583
2584 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2585 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2586
2587 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2588 u16 interval;
2589 u32 extra;
2590 u32 suspend_time = 100;
2591 u32 scan_suspend_time = 100;
2592
2593 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2594
2595 interval = vif->bss_conf.beacon_int;
2596
2597 scan->suspend_time = 0;
2598 scan->max_out_time = cpu_to_le32(200 * 1024);
2599 if (!interval)
2600 interval = suspend_time;
2601 /*
2602 * suspend time format:
2603 * 0-19: beacon interval in usec (time before exec.)
2604 * 20-23: 0
2605 * 24-31: number of beacons (suspend between channels)
2606 */
2607
2608 extra = (suspend_time / interval) << 24;
2609 scan_suspend_time = 0xFF0FFFFF &
2610 (extra | ((suspend_time % interval) * 1024));
2611
2612 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2613 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
2614 scan_suspend_time, interval);
2615 }
2616
2617 if (priv->scan_request->n_ssids) {
2618 int i, p = 0;
2619 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
2620 for (i = 0; i < priv->scan_request->n_ssids; i++) {
2621 /* always does wildcard anyway */
2622 if (!priv->scan_request->ssids[i].ssid_len)
2623 continue;
2624 scan->direct_scan[p].id = WLAN_EID_SSID;
2625 scan->direct_scan[p].len =
2626 priv->scan_request->ssids[i].ssid_len;
2627 memcpy(scan->direct_scan[p].ssid,
2628 priv->scan_request->ssids[i].ssid,
2629 priv->scan_request->ssids[i].ssid_len);
2630 n_probes++;
2631 p++;
2632 }
2633 is_active = true;
2634 } else
2635 IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
2636
2637 /* We don't build a direct scan probe request; the uCode will do
2638 * that based on the direct_mask added to each channel entry */
2639 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2640 scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2641 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2642
2643 /* flags + rate selection */
2644
2645 switch (priv->scan_band) {
2646 case IEEE80211_BAND_2GHZ:
2647 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2648 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2649 band = IEEE80211_BAND_2GHZ;
2650 break;
2651 case IEEE80211_BAND_5GHZ:
2652 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2653 band = IEEE80211_BAND_5GHZ;
2654 break;
2655 default:
2656 IWL_WARN(priv, "Invalid scan band\n");
2657 return -EIO;
2658 }
2659
2660 /*
2661 * If active scaning is requested but a certain channel
2662 * is marked passive, we can do active scanning if we
2663 * detect transmissions.
2664 */
2665 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2666 IWL_GOOD_CRC_TH_DISABLED;
2667
2668 len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
2669 vif->addr, priv->scan_request->ie,
2670 priv->scan_request->ie_len,
2671 IWL_MAX_SCAN_SIZE - sizeof(*scan));
2672 scan->tx_cmd.len = cpu_to_le16(len);
2673
2674 /* select Rx antennas */
2675 scan->flags |= iwl3945_get_antenna_flags(priv);
2676
2677 scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
2678 (void *)&scan->data[len], vif);
2679 if (scan->channel_count == 0) {
2680 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
2681 return -EIO;
2682 }
2683
2684 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
2685 scan->channel_count * sizeof(struct iwl3945_scan_channel);
2686 cmd.data = scan;
2687 scan->len = cpu_to_le16(cmd.len);
2688
2689 set_bit(STATUS_SCAN_HW, &priv->status);
2690 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
2691 if (ret)
2692 clear_bit(STATUS_SCAN_HW, &priv->status);
2693 return ret;
2694}
2695
2696void iwl3945_post_scan(struct iwl_priv *priv)
2697{
2698 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2699
2700 /*
2701 * Since setting the RXON may have been deferred while
2702 * performing the scan, fire one off if needed
2703 */
2704 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2705 iwl3945_commit_rxon(priv, ctx);
2706}
2707
2708static void iwl3945_bg_restart(struct work_struct *data)
2709{
2710 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2711
2712 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2713 return;
2714
2715 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2716 struct iwl_rxon_context *ctx;
2717 mutex_lock(&priv->mutex);
2718 for_each_context(priv, ctx)
2719 ctx->vif = NULL;
2720 priv->is_open = 0;
2721 mutex_unlock(&priv->mutex);
2722 iwl3945_down(priv);
2723 ieee80211_restart_hw(priv->hw);
2724 } else {
2725 iwl3945_down(priv);
2726
2727 mutex_lock(&priv->mutex);
2728 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2729 mutex_unlock(&priv->mutex);
2730 return;
2731 }
2732
2733 __iwl3945_up(priv);
2734 mutex_unlock(&priv->mutex);
2735 }
2736}
2737
2738static void iwl3945_bg_rx_replenish(struct work_struct *data)
2739{
2740 struct iwl_priv *priv =
2741 container_of(data, struct iwl_priv, rx_replenish);
2742
2743 mutex_lock(&priv->mutex);
2744 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2745 goto out;
2746
2747 iwl3945_rx_replenish(priv);
2748out:
2749 mutex_unlock(&priv->mutex);
2750}
2751
2752void iwl3945_post_associate(struct iwl_priv *priv)
2753{
2754 int rc = 0;
2755 struct ieee80211_conf *conf = NULL;
2756 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2757
2758 if (!ctx->vif || !priv->is_open)
2759 return;
2760
2761 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2762 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
2763
2764 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2765 return;
2766
2767 iwl_legacy_scan_cancel_timeout(priv, 200);
2768
2769 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
2770
2771 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2772 iwl3945_commit_rxon(priv, ctx);
2773
2774 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2775 if (rc)
2776 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2777 "Attempting to continue.\n");
2778
2779 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2780
2781 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
2782
2783 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2784 ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
2785
2786 if (ctx->vif->bss_conf.use_short_preamble)
2787 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2788 else
2789 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2790
2791 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2792 if (ctx->vif->bss_conf.use_short_slot)
2793 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2794 else
2795 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2796 }
2797
2798 iwl3945_commit_rxon(priv, ctx);
2799
2800 switch (ctx->vif->type) {
2801 case NL80211_IFTYPE_STATION:
2802 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
2803 break;
2804 case NL80211_IFTYPE_ADHOC:
2805 iwl3945_send_beacon_cmd(priv);
2806 break;
2807 default:
2808 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2809 __func__, ctx->vif->type);
2810 break;
2811 }
2812}
2813
2814/*****************************************************************************
2815 *
2816 * mac80211 entry point functions
2817 *
2818 *****************************************************************************/
2819
2820#define UCODE_READY_TIMEOUT (2 * HZ)
2821
2822static int iwl3945_mac_start(struct ieee80211_hw *hw)
2823{
2824 struct iwl_priv *priv = hw->priv;
2825 int ret;
2826
2827 IWL_DEBUG_MAC80211(priv, "enter\n");
2828
2829 /* we should be verifying the device is ready to be opened */
2830 mutex_lock(&priv->mutex);
2831
2832 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2833 * ucode filename and max sizes are card-specific. */
2834
2835 if (!priv->ucode_code.len) {
2836 ret = iwl3945_read_ucode(priv);
2837 if (ret) {
2838 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
2839 mutex_unlock(&priv->mutex);
2840 goto out_release_irq;
2841 }
2842 }
2843
2844 ret = __iwl3945_up(priv);
2845
2846 mutex_unlock(&priv->mutex);
2847
2848 if (ret)
2849 goto out_release_irq;
2850
2851 IWL_DEBUG_INFO(priv, "Start UP work.\n");
2852
2853 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
2854 * mac80211 will not be run successfully. */
2855 ret = wait_event_timeout(priv->wait_command_queue,
2856 test_bit(STATUS_READY, &priv->status),
2857 UCODE_READY_TIMEOUT);
2858 if (!ret) {
2859 if (!test_bit(STATUS_READY, &priv->status)) {
2860 IWL_ERR(priv,
2861 "Wait for START_ALIVE timeout after %dms.\n",
2862 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2863 ret = -ETIMEDOUT;
2864 goto out_release_irq;
2865 }
2866 }
2867
2868 /* ucode is running and will send rfkill notifications,
2869 * no need to poll the killswitch state anymore */
2870 cancel_delayed_work(&priv->_3945.rfkill_poll);
2871
2872 priv->is_open = 1;
2873 IWL_DEBUG_MAC80211(priv, "leave\n");
2874 return 0;
2875
2876out_release_irq:
2877 priv->is_open = 0;
2878 IWL_DEBUG_MAC80211(priv, "leave - failed\n");
2879 return ret;
2880}
2881
2882static void iwl3945_mac_stop(struct ieee80211_hw *hw)
2883{
2884 struct iwl_priv *priv = hw->priv;
2885
2886 IWL_DEBUG_MAC80211(priv, "enter\n");
2887
2888 if (!priv->is_open) {
2889 IWL_DEBUG_MAC80211(priv, "leave - skip\n");
2890 return;
2891 }
2892
2893 priv->is_open = 0;
2894
2895 iwl3945_down(priv);
2896
2897 flush_workqueue(priv->workqueue);
2898
2899 /* start polling the killswitch state again */
2900 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2901 round_jiffies_relative(2 * HZ));
2902
2903 IWL_DEBUG_MAC80211(priv, "leave\n");
2904}
2905
2906static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2907{
2908 struct iwl_priv *priv = hw->priv;
2909
2910 IWL_DEBUG_MAC80211(priv, "enter\n");
2911
2912 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2913 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2914
2915 if (iwl3945_tx_skb(priv, skb))
2916 dev_kfree_skb_any(skb);
2917
2918 IWL_DEBUG_MAC80211(priv, "leave\n");
2919}
2920
2921void iwl3945_config_ap(struct iwl_priv *priv)
2922{
2923 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2924 struct ieee80211_vif *vif = ctx->vif;
2925 int rc = 0;
2926
2927 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2928 return;
2929
2930 /* The following should be done only at AP bring up */
2931 if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
2932
2933 /* RXON - unassoc (to set timing command) */
2934 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2935 iwl3945_commit_rxon(priv, ctx);
2936
2937 /* RXON Timing */
2938 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2939 if (rc)
2940 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2941 "Attempting to continue.\n");
2942
2943 ctx->staging.assoc_id = 0;
2944
2945 if (vif->bss_conf.use_short_preamble)
2946 ctx->staging.flags |=
2947 RXON_FLG_SHORT_PREAMBLE_MSK;
2948 else
2949 ctx->staging.flags &=
2950 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2951
2952 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2953 if (vif->bss_conf.use_short_slot)
2954 ctx->staging.flags |=
2955 RXON_FLG_SHORT_SLOT_MSK;
2956 else
2957 ctx->staging.flags &=
2958 ~RXON_FLG_SHORT_SLOT_MSK;
2959 }
2960 /* restore RXON assoc */
2961 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2962 iwl3945_commit_rxon(priv, ctx);
2963 }
2964 iwl3945_send_beacon_cmd(priv);
2965}
2966
2967static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2968 struct ieee80211_vif *vif,
2969 struct ieee80211_sta *sta,
2970 struct ieee80211_key_conf *key)
2971{
2972 struct iwl_priv *priv = hw->priv;
2973 int ret = 0;
2974 u8 sta_id = IWL_INVALID_STATION;
2975 u8 static_key;
2976
2977 IWL_DEBUG_MAC80211(priv, "enter\n");
2978
2979 if (iwl3945_mod_params.sw_crypto) {
2980 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2981 return -EOPNOTSUPP;
2982 }
2983
2984 /*
2985 * To support IBSS RSN, don't program group keys in IBSS, the
2986 * hardware will then not attempt to decrypt the frames.
2987 */
2988 if (vif->type == NL80211_IFTYPE_ADHOC &&
2989 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2990 return -EOPNOTSUPP;
2991
2992 static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
2993
2994 if (!static_key) {
2995 sta_id = iwl_legacy_sta_id_or_broadcast(
2996 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
2997 if (sta_id == IWL_INVALID_STATION)
2998 return -EINVAL;
2999 }
3000
3001 mutex_lock(&priv->mutex);
3002 iwl_legacy_scan_cancel_timeout(priv, 100);
3003
3004 switch (cmd) {
3005 case SET_KEY:
3006 if (static_key)
3007 ret = iwl3945_set_static_key(priv, key);
3008 else
3009 ret = iwl3945_set_dynamic_key(priv, key, sta_id);
3010 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3011 break;
3012 case DISABLE_KEY:
3013 if (static_key)
3014 ret = iwl3945_remove_static_key(priv);
3015 else
3016 ret = iwl3945_clear_sta_key_info(priv, sta_id);
3017 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3018 break;
3019 default:
3020 ret = -EINVAL;
3021 }
3022
3023 mutex_unlock(&priv->mutex);
3024 IWL_DEBUG_MAC80211(priv, "leave\n");
3025
3026 return ret;
3027}
3028
3029static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3030 struct ieee80211_vif *vif,
3031 struct ieee80211_sta *sta)
3032{
3033 struct iwl_priv *priv = hw->priv;
3034 struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3035 int ret;
3036 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3037 u8 sta_id;
3038
3039 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3040 sta->addr);
3041 mutex_lock(&priv->mutex);
3042 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3043 sta->addr);
3044 sta_priv->common.sta_id = IWL_INVALID_STATION;
3045
3046
3047 ret = iwl_legacy_add_station_common(priv,
3048 &priv->contexts[IWL_RXON_CTX_BSS],
3049 sta->addr, is_ap, sta, &sta_id);
3050 if (ret) {
3051 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3052 sta->addr, ret);
3053 /* Should we return success if return code is EEXIST ? */
3054 mutex_unlock(&priv->mutex);
3055 return ret;
3056 }
3057
3058 sta_priv->common.sta_id = sta_id;
3059
3060 /* Initialize rate scaling */
3061 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3062 sta->addr);
3063 iwl3945_rs_rate_init(priv, sta, sta_id);
3064 mutex_unlock(&priv->mutex);
3065
3066 return 0;
3067}
3068
3069static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3070 unsigned int changed_flags,
3071 unsigned int *total_flags,
3072 u64 multicast)
3073{
3074 struct iwl_priv *priv = hw->priv;
3075 __le32 filter_or = 0, filter_nand = 0;
3076 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3077
3078#define CHK(test, flag) do { \
3079 if (*total_flags & (test)) \
3080 filter_or |= (flag); \
3081 else \
3082 filter_nand |= (flag); \
3083 } while (0)
3084
3085 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3086 changed_flags, *total_flags);
3087
3088 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3089 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3090 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3091
3092#undef CHK
3093
3094 mutex_lock(&priv->mutex);
3095
3096 ctx->staging.filter_flags &= ~filter_nand;
3097 ctx->staging.filter_flags |= filter_or;
3098
3099 /*
3100 * Not committing directly because hardware can perform a scan,
3101 * but even if hw is ready, committing here breaks for some reason,
3102 * we'll eventually commit the filter flags change anyway.
3103 */
3104
3105 mutex_unlock(&priv->mutex);
3106
3107 /*
3108 * Receiving all multicast frames is always enabled by the
3109 * default flags setup in iwl_legacy_connection_init_rx_config()
3110 * since we currently do not support programming multicast
3111 * filters into the device.
3112 */
3113 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3114 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3115}
3116
3117
3118/*****************************************************************************
3119 *
3120 * sysfs attributes
3121 *
3122 *****************************************************************************/
3123
3124#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3125
3126/*
3127 * The following adds a new attribute to the sysfs representation
3128 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3129 * used for controlling the debug level.
3130 *
3131 * See the level definitions in iwl for details.
3132 *
3133 * The debug_level being managed using sysfs below is a per device debug
3134 * level that is used instead of the global debug level if it (the per
3135 * device debug level) is set.
3136 */
3137static ssize_t iwl3945_show_debug_level(struct device *d,
3138 struct device_attribute *attr, char *buf)
3139{
3140 struct iwl_priv *priv = dev_get_drvdata(d);
3141 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
3142}
3143static ssize_t iwl3945_store_debug_level(struct device *d,
3144 struct device_attribute *attr,
3145 const char *buf, size_t count)
3146{
3147 struct iwl_priv *priv = dev_get_drvdata(d);
3148 unsigned long val;
3149 int ret;
3150
3151 ret = strict_strtoul(buf, 0, &val);
3152 if (ret)
3153 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3154 else {
3155 priv->debug_level = val;
3156 if (iwl_legacy_alloc_traffic_mem(priv))
3157 IWL_ERR(priv,
3158 "Not enough memory to generate traffic log\n");
3159 }
3160 return strnlen(buf, count);
3161}
3162
3163static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3164 iwl3945_show_debug_level, iwl3945_store_debug_level);
3165
3166#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
3167
3168static ssize_t iwl3945_show_temperature(struct device *d,
3169 struct device_attribute *attr, char *buf)
3170{
3171 struct iwl_priv *priv = dev_get_drvdata(d);
3172
3173 if (!iwl_legacy_is_alive(priv))
3174 return -EAGAIN;
3175
3176 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3177}
3178
3179static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
3180
3181static ssize_t iwl3945_show_tx_power(struct device *d,
3182 struct device_attribute *attr, char *buf)
3183{
3184 struct iwl_priv *priv = dev_get_drvdata(d);
3185 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3186}
3187
3188static ssize_t iwl3945_store_tx_power(struct device *d,
3189 struct device_attribute *attr,
3190 const char *buf, size_t count)
3191{
3192 struct iwl_priv *priv = dev_get_drvdata(d);
3193 char *p = (char *)buf;
3194 u32 val;
3195
3196 val = simple_strtoul(p, &p, 10);
3197 if (p == buf)
3198 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
3199 else
3200 iwl3945_hw_reg_set_txpower(priv, val);
3201
3202 return count;
3203}
3204
3205static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
3206
3207static ssize_t iwl3945_show_flags(struct device *d,
3208 struct device_attribute *attr, char *buf)
3209{
3210 struct iwl_priv *priv = dev_get_drvdata(d);
3211 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3212
3213 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3214}
3215
3216static ssize_t iwl3945_store_flags(struct device *d,
3217 struct device_attribute *attr,
3218 const char *buf, size_t count)
3219{
3220 struct iwl_priv *priv = dev_get_drvdata(d);
3221 u32 flags = simple_strtoul(buf, NULL, 0);
3222 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3223
3224 mutex_lock(&priv->mutex);
3225 if (le32_to_cpu(ctx->staging.flags) != flags) {
3226 /* Cancel any currently running scans... */
3227 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3228 IWL_WARN(priv, "Could not cancel scan.\n");
3229 else {
3230 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3231 flags);
3232 ctx->staging.flags = cpu_to_le32(flags);
3233 iwl3945_commit_rxon(priv, ctx);
3234 }
3235 }
3236 mutex_unlock(&priv->mutex);
3237
3238 return count;
3239}
3240
3241static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
3242
3243static ssize_t iwl3945_show_filter_flags(struct device *d,
3244 struct device_attribute *attr, char *buf)
3245{
3246 struct iwl_priv *priv = dev_get_drvdata(d);
3247 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3248
3249 return sprintf(buf, "0x%04X\n",
3250 le32_to_cpu(ctx->active.filter_flags));
3251}
3252
3253static ssize_t iwl3945_store_filter_flags(struct device *d,
3254 struct device_attribute *attr,
3255 const char *buf, size_t count)
3256{
3257 struct iwl_priv *priv = dev_get_drvdata(d);
3258 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3259 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3260
3261 mutex_lock(&priv->mutex);
3262 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3263 /* Cancel any currently running scans... */
3264 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3265 IWL_WARN(priv, "Could not cancel scan.\n");
3266 else {
3267 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3268 "0x%04X\n", filter_flags);
3269 ctx->staging.filter_flags =
3270 cpu_to_le32(filter_flags);
3271 iwl3945_commit_rxon(priv, ctx);
3272 }
3273 }
3274 mutex_unlock(&priv->mutex);
3275
3276 return count;
3277}
3278
3279static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
3280 iwl3945_store_filter_flags);
3281
3282static ssize_t iwl3945_show_measurement(struct device *d,
3283 struct device_attribute *attr, char *buf)
3284{
3285 struct iwl_priv *priv = dev_get_drvdata(d);
3286 struct iwl_spectrum_notification measure_report;
3287 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3288 u8 *data = (u8 *)&measure_report;
3289 unsigned long flags;
3290
3291 spin_lock_irqsave(&priv->lock, flags);
3292 if (!(priv->measurement_status & MEASUREMENT_READY)) {
3293 spin_unlock_irqrestore(&priv->lock, flags);
3294 return 0;
3295 }
3296 memcpy(&measure_report, &priv->measure_report, size);
3297 priv->measurement_status = 0;
3298 spin_unlock_irqrestore(&priv->lock, flags);
3299
3300 while (size && (PAGE_SIZE - len)) {
3301 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3302 PAGE_SIZE - len, 1);
3303 len = strlen(buf);
3304 if (PAGE_SIZE - len)
3305 buf[len++] = '\n';
3306
3307 ofs += 16;
3308 size -= min(size, 16U);
3309 }
3310
3311 return len;
3312}
3313
3314static ssize_t iwl3945_store_measurement(struct device *d,
3315 struct device_attribute *attr,
3316 const char *buf, size_t count)
3317{
3318 struct iwl_priv *priv = dev_get_drvdata(d);
3319 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3320 struct ieee80211_measurement_params params = {
3321 .channel = le16_to_cpu(ctx->active.channel),
3322 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3323 .duration = cpu_to_le16(1),
3324 };
3325 u8 type = IWL_MEASURE_BASIC;
3326 u8 buffer[32];
3327 u8 channel;
3328
3329 if (count) {
3330 char *p = buffer;
3331 strncpy(buffer, buf, min(sizeof(buffer), count));
3332 channel = simple_strtoul(p, NULL, 0);
3333 if (channel)
3334 params.channel = channel;
3335
3336 p = buffer;
3337 while (*p && *p != ' ')
3338 p++;
3339 if (*p)
3340 type = simple_strtoul(p + 1, NULL, 0);
3341 }
3342
3343 IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
3344 "channel %d (for '%s')\n", type, params.channel, buf);
3345 iwl3945_get_measurement(priv, &params, type);
3346
3347 return count;
3348}
3349
3350static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3351 iwl3945_show_measurement, iwl3945_store_measurement);
3352
3353static ssize_t iwl3945_store_retry_rate(struct device *d,
3354 struct device_attribute *attr,
3355 const char *buf, size_t count)
3356{
3357 struct iwl_priv *priv = dev_get_drvdata(d);
3358
3359 priv->retry_rate = simple_strtoul(buf, NULL, 0);
3360 if (priv->retry_rate <= 0)
3361 priv->retry_rate = 1;
3362
3363 return count;
3364}
3365
3366static ssize_t iwl3945_show_retry_rate(struct device *d,
3367 struct device_attribute *attr, char *buf)
3368{
3369 struct iwl_priv *priv = dev_get_drvdata(d);
3370 return sprintf(buf, "%d", priv->retry_rate);
3371}
3372
3373static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
3374 iwl3945_store_retry_rate);
3375
3376
3377static ssize_t iwl3945_show_channels(struct device *d,
3378 struct device_attribute *attr, char *buf)
3379{
3380 /* all this shit doesn't belong into sysfs anyway */
3381 return 0;
3382}
3383
3384static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
3385
3386static ssize_t iwl3945_show_antenna(struct device *d,
3387 struct device_attribute *attr, char *buf)
3388{
3389 struct iwl_priv *priv = dev_get_drvdata(d);
3390
3391 if (!iwl_legacy_is_alive(priv))
3392 return -EAGAIN;
3393
3394 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3395}
3396
3397static ssize_t iwl3945_store_antenna(struct device *d,
3398 struct device_attribute *attr,
3399 const char *buf, size_t count)
3400{
3401 struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
3402 int ant;
3403
3404 if (count == 0)
3405 return 0;
3406
3407 if (sscanf(buf, "%1i", &ant) != 1) {
3408 IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
3409 return count;
3410 }
3411
3412 if ((ant >= 0) && (ant <= 2)) {
3413 IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
3414 iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
3415 } else
3416 IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
3417
3418
3419 return count;
3420}
3421
3422static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
3423
3424static ssize_t iwl3945_show_status(struct device *d,
3425 struct device_attribute *attr, char *buf)
3426{
3427 struct iwl_priv *priv = dev_get_drvdata(d);
3428 if (!iwl_legacy_is_alive(priv))
3429 return -EAGAIN;
3430 return sprintf(buf, "0x%08x\n", (int)priv->status);
3431}
3432
3433static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
3434
3435static ssize_t iwl3945_dump_error_log(struct device *d,
3436 struct device_attribute *attr,
3437 const char *buf, size_t count)
3438{
3439 struct iwl_priv *priv = dev_get_drvdata(d);
3440 char *p = (char *)buf;
3441
3442 if (p[0] == '1')
3443 iwl3945_dump_nic_error_log(priv);
3444
3445 return strnlen(buf, count);
3446}
3447
3448static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
3449
3450/*****************************************************************************
3451 *
3452 * driver setup and tear down
3453 *
3454 *****************************************************************************/
3455
3456static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3457{
3458 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3459
3460 init_waitqueue_head(&priv->wait_command_queue);
3461
3462 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3463 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3464 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3465 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3466 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3467
3468 iwl_legacy_setup_scan_deferred_work(priv);
3469
3470 iwl3945_hw_setup_deferred_work(priv);
3471
3472 init_timer(&priv->watchdog);
3473 priv->watchdog.data = (unsigned long)priv;
3474 priv->watchdog.function = iwl_legacy_bg_watchdog;
3475
3476 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3477 iwl3945_irq_tasklet, (unsigned long)priv);
3478}
3479
3480static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3481{
3482 iwl3945_hw_cancel_deferred_work(priv);
3483
3484 cancel_delayed_work_sync(&priv->init_alive_start);
3485 cancel_delayed_work(&priv->alive_start);
3486
3487 iwl_legacy_cancel_scan_deferred_work(priv);
3488}
3489
3490static struct attribute *iwl3945_sysfs_entries[] = {
3491 &dev_attr_antenna.attr,
3492 &dev_attr_channels.attr,
3493 &dev_attr_dump_errors.attr,
3494 &dev_attr_flags.attr,
3495 &dev_attr_filter_flags.attr,
3496 &dev_attr_measurement.attr,
3497 &dev_attr_retry_rate.attr,
3498 &dev_attr_status.attr,
3499 &dev_attr_temperature.attr,
3500 &dev_attr_tx_power.attr,
3501#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3502 &dev_attr_debug_level.attr,
3503#endif
3504 NULL
3505};
3506
3507static struct attribute_group iwl3945_attribute_group = {
3508 .name = NULL, /* put in device directory */
3509 .attrs = iwl3945_sysfs_entries,
3510};
3511
3512struct ieee80211_ops iwl3945_hw_ops = {
3513 .tx = iwl3945_mac_tx,
3514 .start = iwl3945_mac_start,
3515 .stop = iwl3945_mac_stop,
3516 .add_interface = iwl_legacy_mac_add_interface,
3517 .remove_interface = iwl_legacy_mac_remove_interface,
3518 .change_interface = iwl_legacy_mac_change_interface,
3519 .config = iwl_legacy_mac_config,
3520 .configure_filter = iwl3945_configure_filter,
3521 .set_key = iwl3945_mac_set_key,
3522 .conf_tx = iwl_legacy_mac_conf_tx,
3523 .reset_tsf = iwl_legacy_mac_reset_tsf,
3524 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3525 .hw_scan = iwl_legacy_mac_hw_scan,
3526 .sta_add = iwl3945_mac_sta_add,
3527 .sta_remove = iwl_legacy_mac_sta_remove,
3528 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
3529};
3530
3531static int iwl3945_init_drv(struct iwl_priv *priv)
3532{
3533 int ret;
3534 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3535
3536 priv->retry_rate = 1;
3537 priv->beacon_skb = NULL;
3538
3539 spin_lock_init(&priv->sta_lock);
3540 spin_lock_init(&priv->hcmd_lock);
3541
3542 INIT_LIST_HEAD(&priv->free_frames);
3543
3544 mutex_init(&priv->mutex);
3545
3546 priv->ieee_channels = NULL;
3547 priv->ieee_rates = NULL;
3548 priv->band = IEEE80211_BAND_2GHZ;
3549
3550 priv->iw_mode = NL80211_IFTYPE_STATION;
3551 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3552
3553 /* initialize force reset */
3554 priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3555
3556 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3557 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3558 eeprom->version);
3559 ret = -EINVAL;
3560 goto err;
3561 }
3562 ret = iwl_legacy_init_channel_map(priv);
3563 if (ret) {
3564 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3565 goto err;
3566 }
3567
3568 /* Set up txpower settings in driver for all channels */
3569 if (iwl3945_txpower_set_from_eeprom(priv)) {
3570 ret = -EIO;
3571 goto err_free_channel_map;
3572 }
3573
3574 ret = iwl_legacy_init_geos(priv);
3575 if (ret) {
3576 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3577 goto err_free_channel_map;
3578 }
3579 iwl3945_init_hw_rates(priv, priv->ieee_rates);
3580
3581 return 0;
3582
3583err_free_channel_map:
3584 iwl_legacy_free_channel_map(priv);
3585err:
3586 return ret;
3587}
3588
3589#define IWL3945_MAX_PROBE_REQUEST 200
3590
3591static int iwl3945_setup_mac(struct iwl_priv *priv)
3592{
3593 int ret;
3594 struct ieee80211_hw *hw = priv->hw;
3595
3596 hw->rate_control_algorithm = "iwl-3945-rs";
3597 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
3598 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3599
3600 /* Tell mac80211 our characteristics */
3601 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3602 IEEE80211_HW_SPECTRUM_MGMT;
3603
3604 hw->wiphy->interface_modes =
3605 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3606
3607 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3608 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3609 WIPHY_FLAG_IBSS_RSN;
3610
3611 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3612 /* we create the 802.11 header and a zero-length SSID element */
3613 hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
3614
3615 /* Default value; 4 EDCA QOS priorities */
3616 hw->queues = 4;
3617
3618 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
3619 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3620 &priv->bands[IEEE80211_BAND_2GHZ];
3621
3622 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
3623 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3624 &priv->bands[IEEE80211_BAND_5GHZ];
3625
3626 iwl_legacy_leds_init(priv);
3627
3628 ret = ieee80211_register_hw(priv->hw);
3629 if (ret) {
3630 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
3631 return ret;
3632 }
3633 priv->mac80211_registered = 1;
3634
3635 return 0;
3636}
3637
3638static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3639{
3640 int err = 0, i;
3641 struct iwl_priv *priv;
3642 struct ieee80211_hw *hw;
3643 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3644 struct iwl3945_eeprom *eeprom;
3645 unsigned long flags;
3646
3647 /***********************
3648 * 1. Allocating HW data
3649 * ********************/
3650
3651 /* mac80211 allocates memory for this device instance, including
3652 * space for this driver's private structure */
3653 hw = iwl_legacy_alloc_all(cfg);
3654 if (hw == NULL) {
3655 pr_err("Can not allocate network device\n");
3656 err = -ENOMEM;
3657 goto out;
3658 }
3659 priv = hw->priv;
3660 SET_IEEE80211_DEV(hw, &pdev->dev);
3661
3662 priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
3663
3664 /* 3945 has only one valid context */
3665 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3666
3667 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3668 priv->contexts[i].ctxid = i;
3669
3670 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3671 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3672 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3673 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3674 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3675 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3676 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3677 BIT(NL80211_IFTYPE_STATION) |
3678 BIT(NL80211_IFTYPE_ADHOC);
3679 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3680 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3681 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3682
3683 /*
3684 * Disabling hardware scan means that mac80211 will perform scans
3685 * "the hard way", rather than using device's scan.
3686 */
3687 if (iwl3945_mod_params.disable_hw_scan) {
3688 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
3689 iwl3945_hw_ops.hw_scan = NULL;
3690 }
3691
3692 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3693 priv->cfg = cfg;
3694 priv->pci_dev = pdev;
3695 priv->inta_mask = CSR_INI_SET_MASK;
3696
3697 if (iwl_legacy_alloc_traffic_mem(priv))
3698 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3699
3700 /***************************
3701 * 2. Initializing PCI bus
3702 * *************************/
3703 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3704 PCIE_LINK_STATE_CLKPM);
3705
3706 if (pci_enable_device(pdev)) {
3707 err = -ENODEV;
3708 goto out_ieee80211_free_hw;
3709 }
3710
3711 pci_set_master(pdev);
3712
3713 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3714 if (!err)
3715 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3716 if (err) {
3717 IWL_WARN(priv, "No suitable DMA available.\n");
3718 goto out_pci_disable_device;
3719 }
3720
3721 pci_set_drvdata(pdev, priv);
3722 err = pci_request_regions(pdev, DRV_NAME);
3723 if (err)
3724 goto out_pci_disable_device;
3725
3726 /***********************
3727 * 3. Read REV Register
3728 * ********************/
3729 priv->hw_base = pci_iomap(pdev, 0, 0);
3730 if (!priv->hw_base) {
3731 err = -ENODEV;
3732 goto out_pci_release_regions;
3733 }
3734
3735 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3736 (unsigned long long) pci_resource_len(pdev, 0));
3737 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3738
3739 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3740 * PCI Tx retries from interfering with C3 CPU state */
3741 pci_write_config_byte(pdev, 0x41, 0x00);
3742
3743 /* these spin locks will be used in apm_ops.init and EEPROM access
3744 * we should init now
3745 */
3746 spin_lock_init(&priv->reg_lock);
3747 spin_lock_init(&priv->lock);
3748
3749 /*
3750 * stop and reset the on-board processor just in case it is in a
3751 * strange state ... like being left stranded by a primary kernel
3752 * and this is now the kdump kernel trying to start up
3753 */
3754 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3755
3756 /***********************
3757 * 4. Read EEPROM
3758 * ********************/
3759
3760 /* Read the EEPROM */
3761 err = iwl_legacy_eeprom_init(priv);
3762 if (err) {
3763 IWL_ERR(priv, "Unable to init EEPROM\n");
3764 goto out_iounmap;
3765 }
3766 /* MAC Address location in EEPROM same for 3945/4965 */
3767 eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3768 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
3769 SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
3770
3771 /***********************
3772 * 5. Setup HW Constants
3773 * ********************/
3774 /* Device-specific setup */
3775 if (iwl3945_hw_set_hw_params(priv)) {
3776 IWL_ERR(priv, "failed to set hw settings\n");
3777 goto out_eeprom_free;
3778 }
3779
3780 /***********************
3781 * 6. Setup priv
3782 * ********************/
3783
3784 err = iwl3945_init_drv(priv);
3785 if (err) {
3786 IWL_ERR(priv, "initializing driver failed\n");
3787 goto out_unset_hw_params;
3788 }
3789
3790 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
3791 priv->cfg->name);
3792
3793 /***********************
3794 * 7. Setup Services
3795 * ********************/
3796
3797 spin_lock_irqsave(&priv->lock, flags);
3798 iwl_legacy_disable_interrupts(priv);
3799 spin_unlock_irqrestore(&priv->lock, flags);
3800
3801 pci_enable_msi(priv->pci_dev);
3802
3803 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3804 IRQF_SHARED, DRV_NAME, priv);
3805 if (err) {
3806 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3807 goto out_disable_msi;
3808 }
3809
3810 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3811 if (err) {
3812 IWL_ERR(priv, "failed to create sysfs device attributes\n");
3813 goto out_release_irq;
3814 }
3815
3816 iwl_legacy_set_rxon_channel(priv,
3817 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
3818 &priv->contexts[IWL_RXON_CTX_BSS]);
3819 iwl3945_setup_deferred_work(priv);
3820 iwl3945_setup_rx_handlers(priv);
3821 iwl_legacy_power_initialize(priv);
3822
3823 /*********************************
3824 * 8. Setup and Register mac80211
3825 * *******************************/
3826
3827 iwl_legacy_enable_interrupts(priv);
3828
3829 err = iwl3945_setup_mac(priv);
3830 if (err)
3831 goto out_remove_sysfs;
3832
3833 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
3834 if (err)
3835 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
3836
3837 /* Start monitoring the killswitch */
3838 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
3839 2 * HZ);
3840
3841 return 0;
3842
3843 out_remove_sysfs:
3844 destroy_workqueue(priv->workqueue);
3845 priv->workqueue = NULL;
3846 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3847 out_release_irq:
3848 free_irq(priv->pci_dev->irq, priv);
3849 out_disable_msi:
3850 pci_disable_msi(priv->pci_dev);
3851 iwl_legacy_free_geos(priv);
3852 iwl_legacy_free_channel_map(priv);
3853 out_unset_hw_params:
3854 iwl3945_unset_hw_params(priv);
3855 out_eeprom_free:
3856 iwl_legacy_eeprom_free(priv);
3857 out_iounmap:
3858 pci_iounmap(pdev, priv->hw_base);
3859 out_pci_release_regions:
3860 pci_release_regions(pdev);
3861 out_pci_disable_device:
3862 pci_set_drvdata(pdev, NULL);
3863 pci_disable_device(pdev);
3864 out_ieee80211_free_hw:
3865 iwl_legacy_free_traffic_mem(priv);
3866 ieee80211_free_hw(priv->hw);
3867 out:
3868 return err;
3869}
3870
3871static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
3872{
3873 struct iwl_priv *priv = pci_get_drvdata(pdev);
3874 unsigned long flags;
3875
3876 if (!priv)
3877 return;
3878
3879 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3880
3881 iwl_legacy_dbgfs_unregister(priv);
3882
3883 set_bit(STATUS_EXIT_PENDING, &priv->status);
3884
3885 iwl_legacy_leds_exit(priv);
3886
3887 if (priv->mac80211_registered) {
3888 ieee80211_unregister_hw(priv->hw);
3889 priv->mac80211_registered = 0;
3890 } else {
3891 iwl3945_down(priv);
3892 }
3893
3894 /*
3895 * Make sure device is reset to low power before unloading driver.
3896 * This may be redundant with iwl_down(), but there are paths to
3897 * run iwl_down() without calling apm_ops.stop(), and there are
3898 * paths to avoid running iwl_down() at all before leaving driver.
3899 * This (inexpensive) call *makes sure* device is reset.
3900 */
3901 iwl_legacy_apm_stop(priv);
3902
3903 /* make sure we flush any pending irq or
3904 * tasklet for the driver
3905 */
3906 spin_lock_irqsave(&priv->lock, flags);
3907 iwl_legacy_disable_interrupts(priv);
3908 spin_unlock_irqrestore(&priv->lock, flags);
3909
3910 iwl3945_synchronize_irq(priv);
3911
3912 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3913
3914 cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
3915
3916 iwl3945_dealloc_ucode_pci(priv);
3917
3918 if (priv->rxq.bd)
3919 iwl3945_rx_queue_free(priv, &priv->rxq);
3920 iwl3945_hw_txq_ctx_free(priv);
3921
3922 iwl3945_unset_hw_params(priv);
3923
3924 /*netif_stop_queue(dev); */
3925 flush_workqueue(priv->workqueue);
3926
3927 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
3928 * priv->workqueue... so we can't take down the workqueue
3929 * until now... */
3930 destroy_workqueue(priv->workqueue);
3931 priv->workqueue = NULL;
3932 iwl_legacy_free_traffic_mem(priv);
3933
3934 free_irq(pdev->irq, priv);
3935 pci_disable_msi(pdev);
3936
3937 pci_iounmap(pdev, priv->hw_base);
3938 pci_release_regions(pdev);
3939 pci_disable_device(pdev);
3940 pci_set_drvdata(pdev, NULL);
3941
3942 iwl_legacy_free_channel_map(priv);
3943 iwl_legacy_free_geos(priv);
3944 kfree(priv->scan_cmd);
3945 if (priv->beacon_skb)
3946 dev_kfree_skb(priv->beacon_skb);
3947
3948 ieee80211_free_hw(priv->hw);
3949}
3950
3951
3952/*****************************************************************************
3953 *
3954 * driver and module entry point
3955 *
3956 *****************************************************************************/
3957
3958static struct pci_driver iwl3945_driver = {
3959 .name = DRV_NAME,
3960 .id_table = iwl3945_hw_card_ids,
3961 .probe = iwl3945_pci_probe,
3962 .remove = __devexit_p(iwl3945_pci_remove),
3963 .driver.pm = IWL_LEGACY_PM_OPS,
3964};
3965
3966static int __init iwl3945_init(void)
3967{
3968
3969 int ret;
3970 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3971 pr_info(DRV_COPYRIGHT "\n");
3972
3973 ret = iwl3945_rate_control_register();
3974 if (ret) {
3975 pr_err("Unable to register rate control algorithm: %d\n", ret);
3976 return ret;
3977 }
3978
3979 ret = pci_register_driver(&iwl3945_driver);
3980 if (ret) {
3981 pr_err("Unable to initialize PCI module\n");
3982 goto error_register;
3983 }
3984
3985 return ret;
3986
3987error_register:
3988 iwl3945_rate_control_unregister();
3989 return ret;
3990}
3991
3992static void __exit iwl3945_exit(void)
3993{
3994 pci_unregister_driver(&iwl3945_driver);
3995 iwl3945_rate_control_unregister();
3996}
3997
3998MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
3999
4000module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4001MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4002module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4003MODULE_PARM_DESC(swcrypto,
4004 "using software crypto (default 1 [software])");
4005module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4006 int, S_IRUGO);
4007MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
4008#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4009module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
4010MODULE_PARM_DESC(debug, "debug output mask");
4011#endif
4012module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4013MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4014
4015module_exit(iwl3945_exit);
4016module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
deleted file mode 100644
index d2fba9eae153..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ /dev/null
@@ -1,3281 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "iwl-eeprom.h"
54#include "iwl-dev.h"
55#include "iwl-core.h"
56#include "iwl-io.h"
57#include "iwl-helpers.h"
58#include "iwl-sta.h"
59#include "iwl-4965-calib.h"
60#include "iwl-4965.h"
61#include "iwl-4965-led.h"
62
63
64/******************************************************************************
65 *
66 * module boiler plate
67 *
68 ******************************************************************************/
69
70/*
71 * module name, copyright, version, etc.
72 */
73#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
74
75#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
76#define VD "d"
77#else
78#define VD
79#endif
80
81#define DRV_VERSION IWLWIFI_VERSION VD
82
83
84MODULE_DESCRIPTION(DRV_DESCRIPTION);
85MODULE_VERSION(DRV_VERSION);
86MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
87MODULE_LICENSE("GPL");
88MODULE_ALIAS("iwl4965");
89
90void iwl4965_update_chain_flags(struct iwl_priv *priv)
91{
92 struct iwl_rxon_context *ctx;
93
94 if (priv->cfg->ops->hcmd->set_rxon_chain) {
95 for_each_context(priv, ctx) {
96 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
97 if (ctx->active.rx_chain != ctx->staging.rx_chain)
98 iwl_legacy_commit_rxon(priv, ctx);
99 }
100 }
101}
102
103static void iwl4965_clear_free_frames(struct iwl_priv *priv)
104{
105 struct list_head *element;
106
107 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
108 priv->frames_count);
109
110 while (!list_empty(&priv->free_frames)) {
111 element = priv->free_frames.next;
112 list_del(element);
113 kfree(list_entry(element, struct iwl_frame, list));
114 priv->frames_count--;
115 }
116
117 if (priv->frames_count) {
118 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
119 priv->frames_count);
120 priv->frames_count = 0;
121 }
122}
123
124static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
125{
126 struct iwl_frame *frame;
127 struct list_head *element;
128 if (list_empty(&priv->free_frames)) {
129 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
130 if (!frame) {
131 IWL_ERR(priv, "Could not allocate frame!\n");
132 return NULL;
133 }
134
135 priv->frames_count++;
136 return frame;
137 }
138
139 element = priv->free_frames.next;
140 list_del(element);
141 return list_entry(element, struct iwl_frame, list);
142}
143
144static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
145{
146 memset(frame, 0, sizeof(*frame));
147 list_add(&frame->list, &priv->free_frames);
148}
149
150static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
151 struct ieee80211_hdr *hdr,
152 int left)
153{
154 lockdep_assert_held(&priv->mutex);
155
156 if (!priv->beacon_skb)
157 return 0;
158
159 if (priv->beacon_skb->len > left)
160 return 0;
161
162 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
163
164 return priv->beacon_skb->len;
165}
166
167/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
168static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
169 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
170 u8 *beacon, u32 frame_size)
171{
172 u16 tim_idx;
173 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
174
175 /*
176 * The index is relative to frame start but we start looking at the
177 * variable-length part of the beacon.
178 */
179 tim_idx = mgmt->u.beacon.variable - beacon;
180
181 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
182 while ((tim_idx < (frame_size - 2)) &&
183 (beacon[tim_idx] != WLAN_EID_TIM))
184 tim_idx += beacon[tim_idx+1] + 2;
185
186 /* If TIM field was found, set variables */
187 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
188 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
189 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
190 } else
191 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
192}
193
194static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
195 struct iwl_frame *frame)
196{
197 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
198 u32 frame_size;
199 u32 rate_flags;
200 u32 rate;
201 /*
202 * We have to set up the TX command, the TX Beacon command, and the
203 * beacon contents.
204 */
205
206 lockdep_assert_held(&priv->mutex);
207
208 if (!priv->beacon_ctx) {
209 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
210 return 0;
211 }
212
213 /* Initialize memory */
214 tx_beacon_cmd = &frame->u.beacon;
215 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
216
217 /* Set up TX beacon contents */
218 frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
219 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
220 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
221 return 0;
222 if (!frame_size)
223 return 0;
224
225 /* Set up TX command fields */
226 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
227 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
228 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
229 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
230 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
231
232 /* Set up TX beacon command fields */
233 iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
234 frame_size);
235
236 /* Set up packet rate and flags */
237 rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
238 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
239 priv->hw_params.valid_tx_ant);
240 rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
241 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
242 rate_flags |= RATE_MCS_CCK_MSK;
243 tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
244 rate_flags);
245
246 return sizeof(*tx_beacon_cmd) + frame_size;
247}
248
249int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
250{
251 struct iwl_frame *frame;
252 unsigned int frame_size;
253 int rc;
254
255 frame = iwl4965_get_free_frame(priv);
256 if (!frame) {
257 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
258 "command.\n");
259 return -ENOMEM;
260 }
261
262 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
263 if (!frame_size) {
264 IWL_ERR(priv, "Error configuring the beacon command\n");
265 iwl4965_free_frame(priv, frame);
266 return -EINVAL;
267 }
268
269 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
270 &frame->u.cmd[0]);
271
272 iwl4965_free_frame(priv, frame);
273
274 return rc;
275}
276
277static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
278{
279 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
280
281 dma_addr_t addr = get_unaligned_le32(&tb->lo);
282 if (sizeof(dma_addr_t) > sizeof(u32))
283 addr |=
284 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
285
286 return addr;
287}
288
289static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
290{
291 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
292
293 return le16_to_cpu(tb->hi_n_len) >> 4;
294}
295
296static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
297 dma_addr_t addr, u16 len)
298{
299 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
300 u16 hi_n_len = len << 4;
301
302 put_unaligned_le32(addr, &tb->lo);
303 if (sizeof(dma_addr_t) > sizeof(u32))
304 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
305
306 tb->hi_n_len = cpu_to_le16(hi_n_len);
307
308 tfd->num_tbs = idx + 1;
309}
310
311static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
312{
313 return tfd->num_tbs & 0x1f;
314}
315
316/**
317 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
318 * @priv - driver private data
319 * @txq - tx queue
320 *
321 * Does NOT advance any TFD circular buffer read/write indexes
322 * Does NOT free the TFD itself (which is within circular buffer)
323 */
324void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
325{
326 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
327 struct iwl_tfd *tfd;
328 struct pci_dev *dev = priv->pci_dev;
329 int index = txq->q.read_ptr;
330 int i;
331 int num_tbs;
332
333 tfd = &tfd_tmp[index];
334
335 /* Sanity check on number of chunks */
336 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
337
338 if (num_tbs >= IWL_NUM_OF_TBS) {
339 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
340 /* @todo issue fatal error, it is quite serious situation */
341 return;
342 }
343
344 /* Unmap tx_cmd */
345 if (num_tbs)
346 pci_unmap_single(dev,
347 dma_unmap_addr(&txq->meta[index], mapping),
348 dma_unmap_len(&txq->meta[index], len),
349 PCI_DMA_BIDIRECTIONAL);
350
351 /* Unmap chunks, if any. */
352 for (i = 1; i < num_tbs; i++)
353 pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
354 iwl4965_tfd_tb_get_len(tfd, i),
355 PCI_DMA_TODEVICE);
356
357 /* free SKB */
358 if (txq->txb) {
359 struct sk_buff *skb;
360
361 skb = txq->txb[txq->q.read_ptr].skb;
362
363 /* can be called from irqs-disabled context */
364 if (skb) {
365 dev_kfree_skb_any(skb);
366 txq->txb[txq->q.read_ptr].skb = NULL;
367 }
368 }
369}
370
371int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
372 struct iwl_tx_queue *txq,
373 dma_addr_t addr, u16 len,
374 u8 reset, u8 pad)
375{
376 struct iwl_queue *q;
377 struct iwl_tfd *tfd, *tfd_tmp;
378 u32 num_tbs;
379
380 q = &txq->q;
381 tfd_tmp = (struct iwl_tfd *)txq->tfds;
382 tfd = &tfd_tmp[q->write_ptr];
383
384 if (reset)
385 memset(tfd, 0, sizeof(*tfd));
386
387 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
388
389 /* Each TFD can point to a maximum 20 Tx buffers */
390 if (num_tbs >= IWL_NUM_OF_TBS) {
391 IWL_ERR(priv, "Error can not send more than %d chunks\n",
392 IWL_NUM_OF_TBS);
393 return -EINVAL;
394 }
395
396 BUG_ON(addr & ~DMA_BIT_MASK(36));
397 if (unlikely(addr & ~IWL_TX_DMA_MASK))
398 IWL_ERR(priv, "Unaligned address = %llx\n",
399 (unsigned long long)addr);
400
401 iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
402
403 return 0;
404}
405
406/*
407 * Tell nic where to find circular buffer of Tx Frame Descriptors for
408 * given Tx queue, and enable the DMA channel used for that queue.
409 *
410 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
411 * channels supported in hardware.
412 */
413int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
414 struct iwl_tx_queue *txq)
415{
416 int txq_id = txq->q.id;
417
418 /* Circular buffer (TFD queue in DRAM) physical base address */
419 iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
420 txq->q.dma_addr >> 8);
421
422 return 0;
423}
424
425/******************************************************************************
426 *
427 * Generic RX handler implementations
428 *
429 ******************************************************************************/
430static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
431 struct iwl_rx_mem_buffer *rxb)
432{
433 struct iwl_rx_packet *pkt = rxb_addr(rxb);
434 struct iwl_alive_resp *palive;
435 struct delayed_work *pwork;
436
437 palive = &pkt->u.alive_frame;
438
439 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
440 "0x%01X 0x%01X\n",
441 palive->is_valid, palive->ver_type,
442 palive->ver_subtype);
443
444 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
445 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
446 memcpy(&priv->card_alive_init,
447 &pkt->u.alive_frame,
448 sizeof(struct iwl_init_alive_resp));
449 pwork = &priv->init_alive_start;
450 } else {
451 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
452 memcpy(&priv->card_alive, &pkt->u.alive_frame,
453 sizeof(struct iwl_alive_resp));
454 pwork = &priv->alive_start;
455 }
456
457 /* We delay the ALIVE response by 5ms to
458 * give the HW RF Kill time to activate... */
459 if (palive->is_valid == UCODE_VALID_OK)
460 queue_delayed_work(priv->workqueue, pwork,
461 msecs_to_jiffies(5));
462 else
463 IWL_WARN(priv, "uCode did not respond OK.\n");
464}
465
466/**
467 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
468 *
469 * This callback is provided in order to send a statistics request.
470 *
471 * This timer function is continually reset to execute within
472 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
473 * was received. We need to ensure we receive the statistics in order
474 * to update the temperature used for calibrating the TXPOWER.
475 */
476static void iwl4965_bg_statistics_periodic(unsigned long data)
477{
478 struct iwl_priv *priv = (struct iwl_priv *)data;
479
480 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
481 return;
482
483 /* dont send host command if rf-kill is on */
484 if (!iwl_legacy_is_ready_rf(priv))
485 return;
486
487 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
488}
489
490static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
491 struct iwl_rx_mem_buffer *rxb)
492{
493 struct iwl_rx_packet *pkt = rxb_addr(rxb);
494 struct iwl4965_beacon_notif *beacon =
495 (struct iwl4965_beacon_notif *)pkt->u.raw;
496#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
497 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
498
499 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
500 "tsf %d %d rate %d\n",
501 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
502 beacon->beacon_notify_hdr.failure_frame,
503 le32_to_cpu(beacon->ibss_mgr_status),
504 le32_to_cpu(beacon->high_tsf),
505 le32_to_cpu(beacon->low_tsf), rate);
506#endif
507
508 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
509}
510
511static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
512{
513 unsigned long flags;
514
515 IWL_DEBUG_POWER(priv, "Stop all queues\n");
516
517 if (priv->mac80211_registered)
518 ieee80211_stop_queues(priv->hw);
519
520 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
521 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
522 iwl_read32(priv, CSR_UCODE_DRV_GP1);
523
524 spin_lock_irqsave(&priv->reg_lock, flags);
525 if (!iwl_grab_nic_access(priv))
526 iwl_release_nic_access(priv);
527 spin_unlock_irqrestore(&priv->reg_lock, flags);
528}
529
530/* Handle notification from uCode that card's power state is changing
531 * due to software, hardware, or critical temperature RFKILL */
532static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
533 struct iwl_rx_mem_buffer *rxb)
534{
535 struct iwl_rx_packet *pkt = rxb_addr(rxb);
536 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
537 unsigned long status = priv->status;
538
539 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
540 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
541 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
542 (flags & CT_CARD_DISABLED) ?
543 "Reached" : "Not reached");
544
545 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
546 CT_CARD_DISABLED)) {
547
548 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
549 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
550
551 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
552 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
553
554 if (!(flags & RXON_CARD_DISABLED)) {
555 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
556 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
557 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
558 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
559 }
560 }
561
562 if (flags & CT_CARD_DISABLED)
563 iwl4965_perform_ct_kill_task(priv);
564
565 if (flags & HW_CARD_DISABLED)
566 set_bit(STATUS_RF_KILL_HW, &priv->status);
567 else
568 clear_bit(STATUS_RF_KILL_HW, &priv->status);
569
570 if (!(flags & RXON_CARD_DISABLED))
571 iwl_legacy_scan_cancel(priv);
572
573 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
574 test_bit(STATUS_RF_KILL_HW, &priv->status)))
575 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
576 test_bit(STATUS_RF_KILL_HW, &priv->status));
577 else
578 wake_up(&priv->wait_command_queue);
579}
580
581/**
582 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
583 *
584 * Setup the RX handlers for each of the reply types sent from the uCode
585 * to the host.
586 *
587 * This function chains into the hardware specific files for them to setup
588 * any hardware specific handlers as well.
589 */
590static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
591{
592 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
593 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
594 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
595 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
596 iwl_legacy_rx_spectrum_measure_notif;
597 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
598 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
599 iwl_legacy_rx_pm_debug_statistics_notif;
600 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
601
602 /*
603 * The same handler is used for both the REPLY to a discrete
604 * statistics request from the host as well as for the periodic
605 * statistics notifications (after received beacons) from the uCode.
606 */
607 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
608 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
609
610 iwl_legacy_setup_rx_scan_handlers(priv);
611
612 /* status change handler */
613 priv->rx_handlers[CARD_STATE_NOTIFICATION] =
614 iwl4965_rx_card_state_notif;
615
616 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
617 iwl4965_rx_missed_beacon_notif;
618 /* Rx handlers */
619 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
620 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
621 /* block ack */
622 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
623 /* Set up hardware specific Rx handlers */
624 priv->cfg->ops->lib->rx_handler_setup(priv);
625}
626
627/**
628 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
629 *
630 * Uses the priv->rx_handlers callback function array to invoke
631 * the appropriate handlers, including command responses,
632 * frame-received notifications, and other notifications.
633 */
634void iwl4965_rx_handle(struct iwl_priv *priv)
635{
636 struct iwl_rx_mem_buffer *rxb;
637 struct iwl_rx_packet *pkt;
638 struct iwl_rx_queue *rxq = &priv->rxq;
639 u32 r, i;
640 int reclaim;
641 unsigned long flags;
642 u8 fill_rx = 0;
643 u32 count = 8;
644 int total_empty;
645
646 /* uCode's read index (stored in shared DRAM) indicates the last Rx
647 * buffer that the driver may process (last buffer filled by ucode). */
648 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
649 i = rxq->read;
650
651 /* Rx interrupt, but nothing sent from uCode */
652 if (i == r)
653 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
654
655 /* calculate total frames need to be restock after handling RX */
656 total_empty = r - rxq->write_actual;
657 if (total_empty < 0)
658 total_empty += RX_QUEUE_SIZE;
659
660 if (total_empty > (RX_QUEUE_SIZE / 2))
661 fill_rx = 1;
662
663 while (i != r) {
664 int len;
665
666 rxb = rxq->queue[i];
667
668 /* If an RXB doesn't have a Rx queue slot associated with it,
669 * then a bug has been introduced in the queue refilling
670 * routines -- catch it here */
671 BUG_ON(rxb == NULL);
672
673 rxq->queue[i] = NULL;
674
675 pci_unmap_page(priv->pci_dev, rxb->page_dma,
676 PAGE_SIZE << priv->hw_params.rx_page_order,
677 PCI_DMA_FROMDEVICE);
678 pkt = rxb_addr(rxb);
679
680 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
681 len += sizeof(u32); /* account for status word */
682 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
683
684 /* Reclaim a command buffer only if this packet is a response
685 * to a (driver-originated) command.
686 * If the packet (e.g. Rx frame) originated from uCode,
687 * there is no command buffer to reclaim.
688 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
689 * but apparently a few don't get set; catch them here. */
690 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
691 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
692 (pkt->hdr.cmd != REPLY_RX) &&
693 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
694 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
695 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
696 (pkt->hdr.cmd != REPLY_TX);
697
698 /* Based on type of command response or notification,
699 * handle those that need handling via function in
700 * rx_handlers table. See iwl4965_setup_rx_handlers() */
701 if (priv->rx_handlers[pkt->hdr.cmd]) {
702 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
703 i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
704 pkt->hdr.cmd);
705 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
706 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
707 } else {
708 /* No handling needed */
709 IWL_DEBUG_RX(priv,
710 "r %d i %d No handler needed for %s, 0x%02x\n",
711 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
712 pkt->hdr.cmd);
713 }
714
715 /*
716 * XXX: After here, we should always check rxb->page
717 * against NULL before touching it or its virtual
718 * memory (pkt). Because some rx_handler might have
719 * already taken or freed the pages.
720 */
721
722 if (reclaim) {
723 /* Invoke any callbacks, transfer the buffer to caller,
724 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
725 * as we reclaim the driver command queue */
726 if (rxb->page)
727 iwl_legacy_tx_cmd_complete(priv, rxb);
728 else
729 IWL_WARN(priv, "Claim null rxb?\n");
730 }
731
732 /* Reuse the page if possible. For notification packets and
733 * SKBs that fail to Rx correctly, add them back into the
734 * rx_free list for reuse later. */
735 spin_lock_irqsave(&rxq->lock, flags);
736 if (rxb->page != NULL) {
737 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
738 0, PAGE_SIZE << priv->hw_params.rx_page_order,
739 PCI_DMA_FROMDEVICE);
740 list_add_tail(&rxb->list, &rxq->rx_free);
741 rxq->free_count++;
742 } else
743 list_add_tail(&rxb->list, &rxq->rx_used);
744
745 spin_unlock_irqrestore(&rxq->lock, flags);
746
747 i = (i + 1) & RX_QUEUE_MASK;
748 /* If there are a lot of unused frames,
749 * restock the Rx queue so ucode wont assert. */
750 if (fill_rx) {
751 count++;
752 if (count >= 8) {
753 rxq->read = i;
754 iwl4965_rx_replenish_now(priv);
755 count = 0;
756 }
757 }
758 }
759
760 /* Backtrack one entry */
761 rxq->read = i;
762 if (fill_rx)
763 iwl4965_rx_replenish_now(priv);
764 else
765 iwl4965_rx_queue_restock(priv);
766}
767
768/* call this function to flush any scheduled tasklet */
769static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
770{
771 /* wait to make sure we flush pending tasklet*/
772 synchronize_irq(priv->pci_dev->irq);
773 tasklet_kill(&priv->irq_tasklet);
774}
775
776static void iwl4965_irq_tasklet(struct iwl_priv *priv)
777{
778 u32 inta, handled = 0;
779 u32 inta_fh;
780 unsigned long flags;
781 u32 i;
782#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
783 u32 inta_mask;
784#endif
785
786 spin_lock_irqsave(&priv->lock, flags);
787
788 /* Ack/clear/reset pending uCode interrupts.
789 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
790 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
791 inta = iwl_read32(priv, CSR_INT);
792 iwl_write32(priv, CSR_INT, inta);
793
794 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
795 * Any new interrupts that happen after this, either while we're
796 * in this tasklet, or later, will show up in next ISR/tasklet. */
797 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
798 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
799
800#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
801 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
802 /* just for debug */
803 inta_mask = iwl_read32(priv, CSR_INT_MASK);
804 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
805 inta, inta_mask, inta_fh);
806 }
807#endif
808
809 spin_unlock_irqrestore(&priv->lock, flags);
810
811 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
812 * atomic, make sure that inta covers all the interrupts that
813 * we've discovered, even if FH interrupt came in just after
814 * reading CSR_INT. */
815 if (inta_fh & CSR49_FH_INT_RX_MASK)
816 inta |= CSR_INT_BIT_FH_RX;
817 if (inta_fh & CSR49_FH_INT_TX_MASK)
818 inta |= CSR_INT_BIT_FH_TX;
819
820 /* Now service all interrupt bits discovered above. */
821 if (inta & CSR_INT_BIT_HW_ERR) {
822 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
823
824 /* Tell the device to stop sending interrupts */
825 iwl_legacy_disable_interrupts(priv);
826
827 priv->isr_stats.hw++;
828 iwl_legacy_irq_handle_error(priv);
829
830 handled |= CSR_INT_BIT_HW_ERR;
831
832 return;
833 }
834
835#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
836 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
837 /* NIC fires this, but we don't use it, redundant with WAKEUP */
838 if (inta & CSR_INT_BIT_SCD) {
839 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
840 "the frame/frames.\n");
841 priv->isr_stats.sch++;
842 }
843
844 /* Alive notification via Rx interrupt will do the real work */
845 if (inta & CSR_INT_BIT_ALIVE) {
846 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
847 priv->isr_stats.alive++;
848 }
849 }
850#endif
851 /* Safely ignore these bits for debug checks below */
852 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
853
854 /* HW RF KILL switch toggled */
855 if (inta & CSR_INT_BIT_RF_KILL) {
856 int hw_rf_kill = 0;
857 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
858 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
859 hw_rf_kill = 1;
860
861 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
862 hw_rf_kill ? "disable radio" : "enable radio");
863
864 priv->isr_stats.rfkill++;
865
866 /* driver only loads ucode once setting the interface up.
867 * the driver allows loading the ucode even if the radio
868 * is killed. Hence update the killswitch state here. The
869 * rfkill handler will care about restarting if needed.
870 */
871 if (!test_bit(STATUS_ALIVE, &priv->status)) {
872 if (hw_rf_kill)
873 set_bit(STATUS_RF_KILL_HW, &priv->status);
874 else
875 clear_bit(STATUS_RF_KILL_HW, &priv->status);
876 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
877 }
878
879 handled |= CSR_INT_BIT_RF_KILL;
880 }
881
882 /* Chip got too hot and stopped itself */
883 if (inta & CSR_INT_BIT_CT_KILL) {
884 IWL_ERR(priv, "Microcode CT kill error detected.\n");
885 priv->isr_stats.ctkill++;
886 handled |= CSR_INT_BIT_CT_KILL;
887 }
888
889 /* Error detected by uCode */
890 if (inta & CSR_INT_BIT_SW_ERR) {
891 IWL_ERR(priv, "Microcode SW error detected. "
892 " Restarting 0x%X.\n", inta);
893 priv->isr_stats.sw++;
894 iwl_legacy_irq_handle_error(priv);
895 handled |= CSR_INT_BIT_SW_ERR;
896 }
897
898 /*
899 * uCode wakes up after power-down sleep.
900 * Tell device about any new tx or host commands enqueued,
901 * and about any Rx buffers made available while asleep.
902 */
903 if (inta & CSR_INT_BIT_WAKEUP) {
904 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
905 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
906 for (i = 0; i < priv->hw_params.max_txq_num; i++)
907 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
908 priv->isr_stats.wakeup++;
909 handled |= CSR_INT_BIT_WAKEUP;
910 }
911
912 /* All uCode command responses, including Tx command responses,
913 * Rx "responses" (frame-received notification), and other
914 * notifications from uCode come through here*/
915 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
916 iwl4965_rx_handle(priv);
917 priv->isr_stats.rx++;
918 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
919 }
920
921 /* This "Tx" DMA channel is used only for loading uCode */
922 if (inta & CSR_INT_BIT_FH_TX) {
923 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
924 priv->isr_stats.tx++;
925 handled |= CSR_INT_BIT_FH_TX;
926 /* Wake up uCode load routine, now that load is complete */
927 priv->ucode_write_complete = 1;
928 wake_up(&priv->wait_command_queue);
929 }
930
931 if (inta & ~handled) {
932 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
933 priv->isr_stats.unhandled++;
934 }
935
936 if (inta & ~(priv->inta_mask)) {
937 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
938 inta & ~priv->inta_mask);
939 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
940 }
941
942 /* Re-enable all interrupts */
943 /* only Re-enable if disabled by irq */
944 if (test_bit(STATUS_INT_ENABLED, &priv->status))
945 iwl_legacy_enable_interrupts(priv);
946 /* Re-enable RF_KILL if it occurred */
947 else if (handled & CSR_INT_BIT_RF_KILL)
948 iwl_legacy_enable_rfkill_int(priv);
949
950#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
951 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
952 inta = iwl_read32(priv, CSR_INT);
953 inta_mask = iwl_read32(priv, CSR_INT_MASK);
954 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
955 IWL_DEBUG_ISR(priv,
956 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
957 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
958 }
959#endif
960}
961
962/*****************************************************************************
963 *
964 * sysfs attributes
965 *
966 *****************************************************************************/
967
968#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
969
970/*
971 * The following adds a new attribute to the sysfs representation
972 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
973 * used for controlling the debug level.
974 *
975 * See the level definitions in iwl for details.
976 *
977 * The debug_level being managed using sysfs below is a per device debug
978 * level that is used instead of the global debug level if it (the per
979 * device debug level) is set.
980 */
981static ssize_t iwl4965_show_debug_level(struct device *d,
982 struct device_attribute *attr, char *buf)
983{
984 struct iwl_priv *priv = dev_get_drvdata(d);
985 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
986}
987static ssize_t iwl4965_store_debug_level(struct device *d,
988 struct device_attribute *attr,
989 const char *buf, size_t count)
990{
991 struct iwl_priv *priv = dev_get_drvdata(d);
992 unsigned long val;
993 int ret;
994
995 ret = strict_strtoul(buf, 0, &val);
996 if (ret)
997 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
998 else {
999 priv->debug_level = val;
1000 if (iwl_legacy_alloc_traffic_mem(priv))
1001 IWL_ERR(priv,
1002 "Not enough memory to generate traffic log\n");
1003 }
1004 return strnlen(buf, count);
1005}
1006
1007static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
1008 iwl4965_show_debug_level, iwl4965_store_debug_level);
1009
1010
1011#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1012
1013
1014static ssize_t iwl4965_show_temperature(struct device *d,
1015 struct device_attribute *attr, char *buf)
1016{
1017 struct iwl_priv *priv = dev_get_drvdata(d);
1018
1019 if (!iwl_legacy_is_alive(priv))
1020 return -EAGAIN;
1021
1022 return sprintf(buf, "%d\n", priv->temperature);
1023}
1024
1025static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
1026
1027static ssize_t iwl4965_show_tx_power(struct device *d,
1028 struct device_attribute *attr, char *buf)
1029{
1030 struct iwl_priv *priv = dev_get_drvdata(d);
1031
1032 if (!iwl_legacy_is_ready_rf(priv))
1033 return sprintf(buf, "off\n");
1034 else
1035 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
1036}
1037
1038static ssize_t iwl4965_store_tx_power(struct device *d,
1039 struct device_attribute *attr,
1040 const char *buf, size_t count)
1041{
1042 struct iwl_priv *priv = dev_get_drvdata(d);
1043 unsigned long val;
1044 int ret;
1045
1046 ret = strict_strtoul(buf, 10, &val);
1047 if (ret)
1048 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
1049 else {
1050 ret = iwl_legacy_set_tx_power(priv, val, false);
1051 if (ret)
1052 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
1053 ret);
1054 else
1055 ret = count;
1056 }
1057 return ret;
1058}
1059
1060static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
1061 iwl4965_show_tx_power, iwl4965_store_tx_power);
1062
1063static struct attribute *iwl_sysfs_entries[] = {
1064 &dev_attr_temperature.attr,
1065 &dev_attr_tx_power.attr,
1066#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1067 &dev_attr_debug_level.attr,
1068#endif
1069 NULL
1070};
1071
1072static struct attribute_group iwl_attribute_group = {
1073 .name = NULL, /* put in device directory */
1074 .attrs = iwl_sysfs_entries,
1075};
1076
1077/******************************************************************************
1078 *
1079 * uCode download functions
1080 *
1081 ******************************************************************************/
1082
1083static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1084{
1085 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1086 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1087 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1088 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1089 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1090 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1091}
1092
1093static void iwl4965_nic_start(struct iwl_priv *priv)
1094{
1095 /* Remove all resets to allow NIC to operate */
1096 iwl_write32(priv, CSR_RESET, 0);
1097}
1098
1099static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
1100 void *context);
1101static int iwl4965_mac_setup_register(struct iwl_priv *priv,
1102 u32 max_probe_length);
1103
1104static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
1105{
1106 const char *name_pre = priv->cfg->fw_name_pre;
1107 char tag[8];
1108
1109 if (first) {
1110 priv->fw_index = priv->cfg->ucode_api_max;
1111 sprintf(tag, "%d", priv->fw_index);
1112 } else {
1113 priv->fw_index--;
1114 sprintf(tag, "%d", priv->fw_index);
1115 }
1116
1117 if (priv->fw_index < priv->cfg->ucode_api_min) {
1118 IWL_ERR(priv, "no suitable firmware found!\n");
1119 return -ENOENT;
1120 }
1121
1122 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1123
1124 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
1125 priv->firmware_name);
1126
1127 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1128 &priv->pci_dev->dev, GFP_KERNEL, priv,
1129 iwl4965_ucode_callback);
1130}
1131
1132struct iwl4965_firmware_pieces {
1133 const void *inst, *data, *init, *init_data, *boot;
1134 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1135};
1136
1137static int iwl4965_load_firmware(struct iwl_priv *priv,
1138 const struct firmware *ucode_raw,
1139 struct iwl4965_firmware_pieces *pieces)
1140{
1141 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1142 u32 api_ver, hdr_size;
1143 const u8 *src;
1144
1145 priv->ucode_ver = le32_to_cpu(ucode->ver);
1146 api_ver = IWL_UCODE_API(priv->ucode_ver);
1147
1148 switch (api_ver) {
1149 default:
1150 case 0:
1151 case 1:
1152 case 2:
1153 hdr_size = 24;
1154 if (ucode_raw->size < hdr_size) {
1155 IWL_ERR(priv, "File size too small!\n");
1156 return -EINVAL;
1157 }
1158 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
1159 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
1160 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
1161 pieces->init_data_size =
1162 le32_to_cpu(ucode->v1.init_data_size);
1163 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
1164 src = ucode->v1.data;
1165 break;
1166 }
1167
1168 /* Verify size of file vs. image size info in file's header */
1169 if (ucode_raw->size != hdr_size + pieces->inst_size +
1170 pieces->data_size + pieces->init_size +
1171 pieces->init_data_size + pieces->boot_size) {
1172
1173 IWL_ERR(priv,
1174 "uCode file size %d does not match expected size\n",
1175 (int)ucode_raw->size);
1176 return -EINVAL;
1177 }
1178
1179 pieces->inst = src;
1180 src += pieces->inst_size;
1181 pieces->data = src;
1182 src += pieces->data_size;
1183 pieces->init = src;
1184 src += pieces->init_size;
1185 pieces->init_data = src;
1186 src += pieces->init_data_size;
1187 pieces->boot = src;
1188 src += pieces->boot_size;
1189
1190 return 0;
1191}
1192
1193/**
1194 * iwl4965_ucode_callback - callback when firmware was loaded
1195 *
1196 * If loaded successfully, copies the firmware into buffers
1197 * for the card to fetch (via DMA).
1198 */
1199static void
1200iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
1201{
1202 struct iwl_priv *priv = context;
1203 struct iwl_ucode_header *ucode;
1204 int err;
1205 struct iwl4965_firmware_pieces pieces;
1206 const unsigned int api_max = priv->cfg->ucode_api_max;
1207 const unsigned int api_min = priv->cfg->ucode_api_min;
1208 u32 api_ver;
1209
1210 u32 max_probe_length = 200;
1211 u32 standard_phy_calibration_size =
1212 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1213
1214 memset(&pieces, 0, sizeof(pieces));
1215
1216 if (!ucode_raw) {
1217 if (priv->fw_index <= priv->cfg->ucode_api_max)
1218 IWL_ERR(priv,
1219 "request for firmware file '%s' failed.\n",
1220 priv->firmware_name);
1221 goto try_again;
1222 }
1223
1224 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1225 priv->firmware_name, ucode_raw->size);
1226
1227 /* Make sure that we got at least the API version number */
1228 if (ucode_raw->size < 4) {
1229 IWL_ERR(priv, "File size way too small!\n");
1230 goto try_again;
1231 }
1232
1233 /* Data from ucode file: header followed by uCode images */
1234 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1235
1236 err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
1237
1238 if (err)
1239 goto try_again;
1240
1241 api_ver = IWL_UCODE_API(priv->ucode_ver);
1242
1243 /*
1244 * api_ver should match the api version forming part of the
1245 * firmware filename ... but we don't check for that and only rely
1246 * on the API version read from firmware header from here on forward
1247 */
1248 if (api_ver < api_min || api_ver > api_max) {
1249 IWL_ERR(priv,
1250 "Driver unable to support your firmware API. "
1251 "Driver supports v%u, firmware is v%u.\n",
1252 api_max, api_ver);
1253 goto try_again;
1254 }
1255
1256 if (api_ver != api_max)
1257 IWL_ERR(priv,
1258 "Firmware has old API version. Expected v%u, "
1259 "got v%u. New firmware can be obtained "
1260 "from http://www.intellinuxwireless.org.\n",
1261 api_max, api_ver);
1262
1263 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1264 IWL_UCODE_MAJOR(priv->ucode_ver),
1265 IWL_UCODE_MINOR(priv->ucode_ver),
1266 IWL_UCODE_API(priv->ucode_ver),
1267 IWL_UCODE_SERIAL(priv->ucode_ver));
1268
1269 snprintf(priv->hw->wiphy->fw_version,
1270 sizeof(priv->hw->wiphy->fw_version),
1271 "%u.%u.%u.%u",
1272 IWL_UCODE_MAJOR(priv->ucode_ver),
1273 IWL_UCODE_MINOR(priv->ucode_ver),
1274 IWL_UCODE_API(priv->ucode_ver),
1275 IWL_UCODE_SERIAL(priv->ucode_ver));
1276
1277 /*
1278 * For any of the failures below (before allocating pci memory)
1279 * we will try to load a version with a smaller API -- maybe the
1280 * user just got a corrupted version of the latest API.
1281 */
1282
1283 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1284 priv->ucode_ver);
1285 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1286 pieces.inst_size);
1287 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1288 pieces.data_size);
1289 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1290 pieces.init_size);
1291 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1292 pieces.init_data_size);
1293 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1294 pieces.boot_size);
1295
1296 /* Verify that uCode images will fit in card's SRAM */
1297 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1298 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1299 pieces.inst_size);
1300 goto try_again;
1301 }
1302
1303 if (pieces.data_size > priv->hw_params.max_data_size) {
1304 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1305 pieces.data_size);
1306 goto try_again;
1307 }
1308
1309 if (pieces.init_size > priv->hw_params.max_inst_size) {
1310 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1311 pieces.init_size);
1312 goto try_again;
1313 }
1314
1315 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1316 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1317 pieces.init_data_size);
1318 goto try_again;
1319 }
1320
1321 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1322 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1323 pieces.boot_size);
1324 goto try_again;
1325 }
1326
1327 /* Allocate ucode buffers for card's bus-master loading ... */
1328
1329 /* Runtime instructions and 2 copies of data:
1330 * 1) unmodified from disk
1331 * 2) backup cache for save/restore during power-downs */
1332 priv->ucode_code.len = pieces.inst_size;
1333 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1334
1335 priv->ucode_data.len = pieces.data_size;
1336 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1337
1338 priv->ucode_data_backup.len = pieces.data_size;
1339 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1340
1341 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1342 !priv->ucode_data_backup.v_addr)
1343 goto err_pci_alloc;
1344
1345 /* Initialization instructions and data */
1346 if (pieces.init_size && pieces.init_data_size) {
1347 priv->ucode_init.len = pieces.init_size;
1348 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1349
1350 priv->ucode_init_data.len = pieces.init_data_size;
1351 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1352
1353 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1354 goto err_pci_alloc;
1355 }
1356
1357 /* Bootstrap (instructions only, no data) */
1358 if (pieces.boot_size) {
1359 priv->ucode_boot.len = pieces.boot_size;
1360 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1361
1362 if (!priv->ucode_boot.v_addr)
1363 goto err_pci_alloc;
1364 }
1365
1366 /* Now that we can no longer fail, copy information */
1367
1368 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1369
1370 /* Copy images into buffers for card's bus-master reads ... */
1371
1372 /* Runtime instructions (first block of data in file) */
1373 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1374 pieces.inst_size);
1375 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1376
1377 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1378 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1379
1380 /*
1381 * Runtime data
1382 * NOTE: Copy into backup buffer will be done in iwl_up()
1383 */
1384 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1385 pieces.data_size);
1386 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1387 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1388
1389 /* Initialization instructions */
1390 if (pieces.init_size) {
1391 IWL_DEBUG_INFO(priv,
1392 "Copying (but not loading) init instr len %Zd\n",
1393 pieces.init_size);
1394 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1395 }
1396
1397 /* Initialization data */
1398 if (pieces.init_data_size) {
1399 IWL_DEBUG_INFO(priv,
1400 "Copying (but not loading) init data len %Zd\n",
1401 pieces.init_data_size);
1402 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1403 pieces.init_data_size);
1404 }
1405
1406 /* Bootstrap instructions */
1407 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1408 pieces.boot_size);
1409 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1410
1411 /*
1412 * figure out the offset of chain noise reset and gain commands
1413 * base on the size of standard phy calibration commands table size
1414 */
1415 priv->_4965.phy_calib_chain_noise_reset_cmd =
1416 standard_phy_calibration_size;
1417 priv->_4965.phy_calib_chain_noise_gain_cmd =
1418 standard_phy_calibration_size + 1;
1419
1420 /**************************************************
1421 * This is still part of probe() in a sense...
1422 *
1423 * 9. Setup and register with mac80211 and debugfs
1424 **************************************************/
1425 err = iwl4965_mac_setup_register(priv, max_probe_length);
1426 if (err)
1427 goto out_unbind;
1428
1429 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
1430 if (err)
1431 IWL_ERR(priv,
1432 "failed to create debugfs files. Ignoring error: %d\n", err);
1433
1434 err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1435 &iwl_attribute_group);
1436 if (err) {
1437 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1438 goto out_unbind;
1439 }
1440
1441 /* We have our copies now, allow OS release its copies */
1442 release_firmware(ucode_raw);
1443 complete(&priv->_4965.firmware_loading_complete);
1444 return;
1445
1446 try_again:
1447 /* try next, if any */
1448 if (iwl4965_request_firmware(priv, false))
1449 goto out_unbind;
1450 release_firmware(ucode_raw);
1451 return;
1452
1453 err_pci_alloc:
1454 IWL_ERR(priv, "failed to allocate pci memory\n");
1455 iwl4965_dealloc_ucode_pci(priv);
1456 out_unbind:
1457 complete(&priv->_4965.firmware_loading_complete);
1458 device_release_driver(&priv->pci_dev->dev);
1459 release_firmware(ucode_raw);
1460}
1461
1462static const char * const desc_lookup_text[] = {
1463 "OK",
1464 "FAIL",
1465 "BAD_PARAM",
1466 "BAD_CHECKSUM",
1467 "NMI_INTERRUPT_WDG",
1468 "SYSASSERT",
1469 "FATAL_ERROR",
1470 "BAD_COMMAND",
1471 "HW_ERROR_TUNE_LOCK",
1472 "HW_ERROR_TEMPERATURE",
1473 "ILLEGAL_CHAN_FREQ",
1474 "VCC_NOT_STABLE",
1475 "FH_ERROR",
1476 "NMI_INTERRUPT_HOST",
1477 "NMI_INTERRUPT_ACTION_PT",
1478 "NMI_INTERRUPT_UNKNOWN",
1479 "UCODE_VERSION_MISMATCH",
1480 "HW_ERROR_ABS_LOCK",
1481 "HW_ERROR_CAL_LOCK_FAIL",
1482 "NMI_INTERRUPT_INST_ACTION_PT",
1483 "NMI_INTERRUPT_DATA_ACTION_PT",
1484 "NMI_TRM_HW_ER",
1485 "NMI_INTERRUPT_TRM",
1486 "NMI_INTERRUPT_BREAK_POINT",
1487 "DEBUG_0",
1488 "DEBUG_1",
1489 "DEBUG_2",
1490 "DEBUG_3",
1491};
1492
1493static struct { char *name; u8 num; } advanced_lookup[] = {
1494 { "NMI_INTERRUPT_WDG", 0x34 },
1495 { "SYSASSERT", 0x35 },
1496 { "UCODE_VERSION_MISMATCH", 0x37 },
1497 { "BAD_COMMAND", 0x38 },
1498 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1499 { "FATAL_ERROR", 0x3D },
1500 { "NMI_TRM_HW_ERR", 0x46 },
1501 { "NMI_INTERRUPT_TRM", 0x4C },
1502 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1503 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1504 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1505 { "NMI_INTERRUPT_HOST", 0x66 },
1506 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1507 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1508 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1509 { "ADVANCED_SYSASSERT", 0 },
1510};
1511
1512static const char *iwl4965_desc_lookup(u32 num)
1513{
1514 int i;
1515 int max = ARRAY_SIZE(desc_lookup_text);
1516
1517 if (num < max)
1518 return desc_lookup_text[num];
1519
1520 max = ARRAY_SIZE(advanced_lookup) - 1;
1521 for (i = 0; i < max; i++) {
1522 if (advanced_lookup[i].num == num)
1523 break;
1524 }
1525 return advanced_lookup[i].name;
1526}
1527
1528#define ERROR_START_OFFSET (1 * sizeof(u32))
1529#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1530
1531void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
1532{
1533 u32 data2, line;
1534 u32 desc, time, count, base, data1;
1535 u32 blink1, blink2, ilink1, ilink2;
1536 u32 pc, hcmd;
1537
1538 if (priv->ucode_type == UCODE_INIT) {
1539 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1540 } else {
1541 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1542 }
1543
1544 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1545 IWL_ERR(priv,
1546 "Not valid error log pointer 0x%08X for %s uCode\n",
1547 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1548 return;
1549 }
1550
1551 count = iwl_legacy_read_targ_mem(priv, base);
1552
1553 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1554 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1555 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1556 priv->status, count);
1557 }
1558
1559 desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
1560 priv->isr_stats.err_code = desc;
1561 pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
1562 blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
1563 blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
1564 ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
1565 ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
1566 data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
1567 data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
1568 line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
1569 time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
1570 hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
1571
1572 trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
1573 time, data1, data2, line,
1574 blink1, blink2, ilink1, ilink2);
1575
1576 IWL_ERR(priv, "Desc Time "
1577 "data1 data2 line\n");
1578 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
1579 iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
1580 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1581 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1582 pc, blink1, blink2, ilink1, ilink2, hcmd);
1583}
1584
1585static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1586{
1587 struct iwl_ct_kill_config cmd;
1588 unsigned long flags;
1589 int ret = 0;
1590
1591 spin_lock_irqsave(&priv->lock, flags);
1592 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1593 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1594 spin_unlock_irqrestore(&priv->lock, flags);
1595
1596 cmd.critical_temperature_R =
1597 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1598
1599 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1600 sizeof(cmd), &cmd);
1601 if (ret)
1602 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1603 else
1604 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1605 "succeeded, "
1606 "critical temperature is %d\n",
1607 priv->hw_params.ct_kill_threshold);
1608}
1609
1610static const s8 default_queue_to_tx_fifo[] = {
1611 IWL_TX_FIFO_VO,
1612 IWL_TX_FIFO_VI,
1613 IWL_TX_FIFO_BE,
1614 IWL_TX_FIFO_BK,
1615 IWL49_CMD_FIFO_NUM,
1616 IWL_TX_FIFO_UNUSED,
1617 IWL_TX_FIFO_UNUSED,
1618};
1619
1620static int iwl4965_alive_notify(struct iwl_priv *priv)
1621{
1622 u32 a;
1623 unsigned long flags;
1624 int i, chan;
1625 u32 reg_val;
1626
1627 spin_lock_irqsave(&priv->lock, flags);
1628
1629 /* Clear 4965's internal Tx Scheduler data base */
1630 priv->scd_base_addr = iwl_legacy_read_prph(priv,
1631 IWL49_SCD_SRAM_BASE_ADDR);
1632 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1633 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1634 iwl_legacy_write_targ_mem(priv, a, 0);
1635 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1636 iwl_legacy_write_targ_mem(priv, a, 0);
1637 for (; a < priv->scd_base_addr +
1638 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
1639 iwl_legacy_write_targ_mem(priv, a, 0);
1640
1641 /* Tel 4965 where to find Tx byte count tables */
1642 iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1643 priv->scd_bc_tbls.dma >> 10);
1644
1645 /* Enable DMA channel */
1646 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
1647 iwl_legacy_write_direct32(priv,
1648 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1649 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1650 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1651
1652 /* Update FH chicken bits */
1653 reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
1654 iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
1655 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1656
1657 /* Disable chain mode for all queues */
1658 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1659
1660 /* Initialize each Tx queue (including the command queue) */
1661 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1662
1663 /* TFD circular buffer read/write indexes */
1664 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1665 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1666
1667 /* Max Tx Window size for Scheduler-ACK mode */
1668 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1669 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1670 (SCD_WIN_SIZE <<
1671 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1672 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1673
1674 /* Frame limit */
1675 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1676 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1677 sizeof(u32),
1678 (SCD_FRAME_LIMIT <<
1679 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1680 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1681
1682 }
1683 iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1684 (1 << priv->hw_params.max_txq_num) - 1);
1685
1686 /* Activate all Tx DMA/FIFO channels */
1687 iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
1688
1689 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
1690
1691 /* make sure all queue are not stopped */
1692 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
1693 for (i = 0; i < 4; i++)
1694 atomic_set(&priv->queue_stop_count[i], 0);
1695
1696 /* reset to 0 to enable all the queue first */
1697 priv->txq_ctx_active_msk = 0;
1698 /* Map each Tx/cmd queue to its corresponding fifo */
1699 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
1700
1701 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1702 int ac = default_queue_to_tx_fifo[i];
1703
1704 iwl_txq_ctx_activate(priv, i);
1705
1706 if (ac == IWL_TX_FIFO_UNUSED)
1707 continue;
1708
1709 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1710 }
1711
1712 spin_unlock_irqrestore(&priv->lock, flags);
1713
1714 return 0;
1715}
1716
1717/**
1718 * iwl4965_alive_start - called after REPLY_ALIVE notification received
1719 * from protocol/runtime uCode (initialization uCode's
1720 * Alive gets handled by iwl_init_alive_start()).
1721 */
1722static void iwl4965_alive_start(struct iwl_priv *priv)
1723{
1724 int ret = 0;
1725 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1726
1727 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
1728
1729 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
1730 /* We had an error bringing up the hardware, so take it
1731 * all the way back down so we can try again */
1732 IWL_DEBUG_INFO(priv, "Alive failed.\n");
1733 goto restart;
1734 }
1735
1736 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
1737 * This is a paranoid check, because we would not have gotten the
1738 * "runtime" alive if code weren't properly loaded. */
1739 if (iwl4965_verify_ucode(priv)) {
1740 /* Runtime instruction load was bad;
1741 * take it all the way back down so we can try again */
1742 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
1743 goto restart;
1744 }
1745
1746 ret = iwl4965_alive_notify(priv);
1747 if (ret) {
1748 IWL_WARN(priv,
1749 "Could not complete ALIVE transition [ntf]: %d\n", ret);
1750 goto restart;
1751 }
1752
1753
1754 /* After the ALIVE response, we can send host commands to the uCode */
1755 set_bit(STATUS_ALIVE, &priv->status);
1756
1757 /* Enable watchdog to monitor the driver tx queues */
1758 iwl_legacy_setup_watchdog(priv);
1759
1760 if (iwl_legacy_is_rfkill(priv))
1761 return;
1762
1763 ieee80211_wake_queues(priv->hw);
1764
1765 priv->active_rate = IWL_RATES_MASK;
1766
1767 if (iwl_legacy_is_associated_ctx(ctx)) {
1768 struct iwl_legacy_rxon_cmd *active_rxon =
1769 (struct iwl_legacy_rxon_cmd *)&ctx->active;
1770 /* apply any changes in staging */
1771 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1772 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1773 } else {
1774 struct iwl_rxon_context *tmp;
1775 /* Initialize our rx_config data */
1776 for_each_context(priv, tmp)
1777 iwl_legacy_connection_init_rx_config(priv, tmp);
1778
1779 if (priv->cfg->ops->hcmd->set_rxon_chain)
1780 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1781 }
1782
1783 /* Configure bluetooth coexistence if enabled */
1784 iwl_legacy_send_bt_config(priv);
1785
1786 iwl4965_reset_run_time_calib(priv);
1787
1788 set_bit(STATUS_READY, &priv->status);
1789
1790 /* Configure the adapter for unassociated operation */
1791 iwl_legacy_commit_rxon(priv, ctx);
1792
1793 /* At this point, the NIC is initialized and operational */
1794 iwl4965_rf_kill_ct_config(priv);
1795
1796 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1797 wake_up(&priv->wait_command_queue);
1798
1799 iwl_legacy_power_update_mode(priv, true);
1800 IWL_DEBUG_INFO(priv, "Updated power mode\n");
1801
1802 return;
1803
1804 restart:
1805 queue_work(priv->workqueue, &priv->restart);
1806}
1807
1808static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
1809
1810static void __iwl4965_down(struct iwl_priv *priv)
1811{
1812 unsigned long flags;
1813 int exit_pending;
1814
1815 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
1816
1817 iwl_legacy_scan_cancel_timeout(priv, 200);
1818
1819 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
1820
1821 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
1822 * to prevent rearm timer */
1823 del_timer_sync(&priv->watchdog);
1824
1825 iwl_legacy_clear_ucode_stations(priv, NULL);
1826 iwl_legacy_dealloc_bcast_stations(priv);
1827 iwl_legacy_clear_driver_stations(priv);
1828
1829 /* Unblock any waiting calls */
1830 wake_up_all(&priv->wait_command_queue);
1831
1832 /* Wipe out the EXIT_PENDING status bit if we are not actually
1833 * exiting the module */
1834 if (!exit_pending)
1835 clear_bit(STATUS_EXIT_PENDING, &priv->status);
1836
1837 /* stop and reset the on-board processor */
1838 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1839
1840 /* tell the device to stop sending interrupts */
1841 spin_lock_irqsave(&priv->lock, flags);
1842 iwl_legacy_disable_interrupts(priv);
1843 spin_unlock_irqrestore(&priv->lock, flags);
1844 iwl4965_synchronize_irq(priv);
1845
1846 if (priv->mac80211_registered)
1847 ieee80211_stop_queues(priv->hw);
1848
1849 /* If we have not previously called iwl_init() then
1850 * clear all bits but the RF Kill bit and return */
1851 if (!iwl_legacy_is_init(priv)) {
1852 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1853 STATUS_RF_KILL_HW |
1854 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1855 STATUS_GEO_CONFIGURED |
1856 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1857 STATUS_EXIT_PENDING;
1858 goto exit;
1859 }
1860
1861 /* ...otherwise clear out all the status bits but the RF Kill
1862 * bit and continue taking the NIC down. */
1863 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1864 STATUS_RF_KILL_HW |
1865 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1866 STATUS_GEO_CONFIGURED |
1867 test_bit(STATUS_FW_ERROR, &priv->status) <<
1868 STATUS_FW_ERROR |
1869 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1870 STATUS_EXIT_PENDING;
1871
1872 iwl4965_txq_ctx_stop(priv);
1873 iwl4965_rxq_stop(priv);
1874
1875 /* Power-down device's busmaster DMA clocks */
1876 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
1877 udelay(5);
1878
1879 /* Make sure (redundant) we've released our request to stay awake */
1880 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
1881 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1882
1883 /* Stop the device, and put it in low power state */
1884 iwl_legacy_apm_stop(priv);
1885
1886 exit:
1887 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
1888
1889 dev_kfree_skb(priv->beacon_skb);
1890 priv->beacon_skb = NULL;
1891
1892 /* clear out any free frames */
1893 iwl4965_clear_free_frames(priv);
1894}
1895
1896static void iwl4965_down(struct iwl_priv *priv)
1897{
1898 mutex_lock(&priv->mutex);
1899 __iwl4965_down(priv);
1900 mutex_unlock(&priv->mutex);
1901
1902 iwl4965_cancel_deferred_work(priv);
1903}
1904
1905#define HW_READY_TIMEOUT (50)
1906
1907static int iwl4965_set_hw_ready(struct iwl_priv *priv)
1908{
1909 int ret = 0;
1910
1911 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1912 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1913
1914 /* See if we got it */
1915 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
1916 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1917 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1918 HW_READY_TIMEOUT);
1919 if (ret != -ETIMEDOUT)
1920 priv->hw_ready = true;
1921 else
1922 priv->hw_ready = false;
1923
1924 IWL_DEBUG_INFO(priv, "hardware %s\n",
1925 (priv->hw_ready == 1) ? "ready" : "not ready");
1926 return ret;
1927}
1928
1929static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
1930{
1931 int ret = 0;
1932
1933 IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
1934
1935 ret = iwl4965_set_hw_ready(priv);
1936 if (priv->hw_ready)
1937 return ret;
1938
1939 /* If HW is not ready, prepare the conditions to check again */
1940 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1941 CSR_HW_IF_CONFIG_REG_PREPARE);
1942
1943 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
1944 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
1945 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
1946
1947 /* HW should be ready by now, check again. */
1948 if (ret != -ETIMEDOUT)
1949 iwl4965_set_hw_ready(priv);
1950
1951 return ret;
1952}
1953
1954#define MAX_HW_RESTARTS 5
1955
1956static int __iwl4965_up(struct iwl_priv *priv)
1957{
1958 struct iwl_rxon_context *ctx;
1959 int i;
1960 int ret;
1961
1962 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1963 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
1964 return -EIO;
1965 }
1966
1967 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
1968 IWL_ERR(priv, "ucode not available for device bringup\n");
1969 return -EIO;
1970 }
1971
1972 for_each_context(priv, ctx) {
1973 ret = iwl4965_alloc_bcast_station(priv, ctx);
1974 if (ret) {
1975 iwl_legacy_dealloc_bcast_stations(priv);
1976 return ret;
1977 }
1978 }
1979
1980 iwl4965_prepare_card_hw(priv);
1981
1982 if (!priv->hw_ready) {
1983 IWL_WARN(priv, "Exit HW not ready\n");
1984 return -EIO;
1985 }
1986
1987 /* If platform's RF_KILL switch is NOT set to KILL */
1988 if (iwl_read32(priv,
1989 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
1990 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1991 else
1992 set_bit(STATUS_RF_KILL_HW, &priv->status);
1993
1994 if (iwl_legacy_is_rfkill(priv)) {
1995 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
1996
1997 iwl_legacy_enable_interrupts(priv);
1998 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
1999 return 0;
2000 }
2001
2002 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2003
2004 /* must be initialised before iwl_hw_nic_init */
2005 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2006
2007 ret = iwl4965_hw_nic_init(priv);
2008 if (ret) {
2009 IWL_ERR(priv, "Unable to init nic\n");
2010 return ret;
2011 }
2012
2013 /* make sure rfkill handshake bits are cleared */
2014 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2015 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2016 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2017
2018 /* clear (again), then enable host interrupts */
2019 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2020 iwl_legacy_enable_interrupts(priv);
2021
2022 /* really make sure rfkill handshake bits are cleared */
2023 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2024 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2025
2026 /* Copy original ucode data image from disk into backup cache.
2027 * This will be used to initialize the on-board processor's
2028 * data SRAM for a clean start when the runtime program first loads. */
2029 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2030 priv->ucode_data.len);
2031
2032 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2033
2034 /* load bootstrap state machine,
2035 * load bootstrap program into processor's memory,
2036 * prepare to load the "initialize" uCode */
2037 ret = priv->cfg->ops->lib->load_ucode(priv);
2038
2039 if (ret) {
2040 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2041 ret);
2042 continue;
2043 }
2044
2045 /* start card; "initialize" will load runtime ucode */
2046 iwl4965_nic_start(priv);
2047
2048 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2049
2050 return 0;
2051 }
2052
2053 set_bit(STATUS_EXIT_PENDING, &priv->status);
2054 __iwl4965_down(priv);
2055 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2056
2057 /* tried to restart and config the device for as long as our
2058 * patience could withstand */
2059 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2060 return -EIO;
2061}
2062
2063
2064/*****************************************************************************
2065 *
2066 * Workqueue callbacks
2067 *
2068 *****************************************************************************/
2069
2070static void iwl4965_bg_init_alive_start(struct work_struct *data)
2071{
2072 struct iwl_priv *priv =
2073 container_of(data, struct iwl_priv, init_alive_start.work);
2074
2075 mutex_lock(&priv->mutex);
2076 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2077 goto out;
2078
2079 priv->cfg->ops->lib->init_alive_start(priv);
2080out:
2081 mutex_unlock(&priv->mutex);
2082}
2083
2084static void iwl4965_bg_alive_start(struct work_struct *data)
2085{
2086 struct iwl_priv *priv =
2087 container_of(data, struct iwl_priv, alive_start.work);
2088
2089 mutex_lock(&priv->mutex);
2090 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2091 goto out;
2092
2093 iwl4965_alive_start(priv);
2094out:
2095 mutex_unlock(&priv->mutex);
2096}
2097
2098static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
2099{
2100 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2101 run_time_calib_work);
2102
2103 mutex_lock(&priv->mutex);
2104
2105 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2106 test_bit(STATUS_SCANNING, &priv->status)) {
2107 mutex_unlock(&priv->mutex);
2108 return;
2109 }
2110
2111 if (priv->start_calib) {
2112 iwl4965_chain_noise_calibration(priv,
2113 (void *)&priv->_4965.statistics);
2114 iwl4965_sensitivity_calibration(priv,
2115 (void *)&priv->_4965.statistics);
2116 }
2117
2118 mutex_unlock(&priv->mutex);
2119}
2120
2121static void iwl4965_bg_restart(struct work_struct *data)
2122{
2123 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2124
2125 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2126 return;
2127
2128 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2129 struct iwl_rxon_context *ctx;
2130
2131 mutex_lock(&priv->mutex);
2132 for_each_context(priv, ctx)
2133 ctx->vif = NULL;
2134 priv->is_open = 0;
2135
2136 __iwl4965_down(priv);
2137
2138 mutex_unlock(&priv->mutex);
2139 iwl4965_cancel_deferred_work(priv);
2140 ieee80211_restart_hw(priv->hw);
2141 } else {
2142 iwl4965_down(priv);
2143
2144 mutex_lock(&priv->mutex);
2145 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2146 mutex_unlock(&priv->mutex);
2147 return;
2148 }
2149
2150 __iwl4965_up(priv);
2151 mutex_unlock(&priv->mutex);
2152 }
2153}
2154
2155static void iwl4965_bg_rx_replenish(struct work_struct *data)
2156{
2157 struct iwl_priv *priv =
2158 container_of(data, struct iwl_priv, rx_replenish);
2159
2160 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2161 return;
2162
2163 mutex_lock(&priv->mutex);
2164 iwl4965_rx_replenish(priv);
2165 mutex_unlock(&priv->mutex);
2166}
2167
2168/*****************************************************************************
2169 *
2170 * mac80211 entry point functions
2171 *
2172 *****************************************************************************/
2173
2174#define UCODE_READY_TIMEOUT (4 * HZ)
2175
2176/*
2177 * Not a mac80211 entry point function, but it fits in with all the
2178 * other mac80211 functions grouped here.
2179 */
2180static int iwl4965_mac_setup_register(struct iwl_priv *priv,
2181 u32 max_probe_length)
2182{
2183 int ret;
2184 struct ieee80211_hw *hw = priv->hw;
2185 struct iwl_rxon_context *ctx;
2186
2187 hw->rate_control_algorithm = "iwl-4965-rs";
2188
2189 /* Tell mac80211 our characteristics */
2190 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2191 IEEE80211_HW_AMPDU_AGGREGATION |
2192 IEEE80211_HW_NEED_DTIM_PERIOD |
2193 IEEE80211_HW_SPECTRUM_MGMT |
2194 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2195
2196 if (priv->cfg->sku & IWL_SKU_N)
2197 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2198 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2199
2200 hw->sta_data_size = sizeof(struct iwl_station_priv);
2201 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2202
2203 for_each_context(priv, ctx) {
2204 hw->wiphy->interface_modes |= ctx->interface_modes;
2205 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2206 }
2207
2208 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2209 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2210
2211 /*
2212 * For now, disable PS by default because it affects
2213 * RX performance significantly.
2214 */
2215 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2216
2217 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2218 /* we create the 802.11 header and a zero-length SSID element */
2219 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
2220
2221 /* Default value; 4 EDCA QOS priorities */
2222 hw->queues = 4;
2223
2224 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2225
2226 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2227 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2228 &priv->bands[IEEE80211_BAND_2GHZ];
2229 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2230 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2231 &priv->bands[IEEE80211_BAND_5GHZ];
2232
2233 iwl_legacy_leds_init(priv);
2234
2235 ret = ieee80211_register_hw(priv->hw);
2236 if (ret) {
2237 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2238 return ret;
2239 }
2240 priv->mac80211_registered = 1;
2241
2242 return 0;
2243}
2244
2245
2246int iwl4965_mac_start(struct ieee80211_hw *hw)
2247{
2248 struct iwl_priv *priv = hw->priv;
2249 int ret;
2250
2251 IWL_DEBUG_MAC80211(priv, "enter\n");
2252
2253 /* we should be verifying the device is ready to be opened */
2254 mutex_lock(&priv->mutex);
2255 ret = __iwl4965_up(priv);
2256 mutex_unlock(&priv->mutex);
2257
2258 if (ret)
2259 return ret;
2260
2261 if (iwl_legacy_is_rfkill(priv))
2262 goto out;
2263
2264 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2265
2266 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2267 * mac80211 will not be run successfully. */
2268 ret = wait_event_timeout(priv->wait_command_queue,
2269 test_bit(STATUS_READY, &priv->status),
2270 UCODE_READY_TIMEOUT);
2271 if (!ret) {
2272 if (!test_bit(STATUS_READY, &priv->status)) {
2273 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2274 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2275 return -ETIMEDOUT;
2276 }
2277 }
2278
2279 iwl4965_led_enable(priv);
2280
2281out:
2282 priv->is_open = 1;
2283 IWL_DEBUG_MAC80211(priv, "leave\n");
2284 return 0;
2285}
2286
2287void iwl4965_mac_stop(struct ieee80211_hw *hw)
2288{
2289 struct iwl_priv *priv = hw->priv;
2290
2291 IWL_DEBUG_MAC80211(priv, "enter\n");
2292
2293 if (!priv->is_open)
2294 return;
2295
2296 priv->is_open = 0;
2297
2298 iwl4965_down(priv);
2299
2300 flush_workqueue(priv->workqueue);
2301
2302 /* User space software may expect getting rfkill changes
2303 * even if interface is down */
2304 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2305 iwl_legacy_enable_rfkill_int(priv);
2306
2307 IWL_DEBUG_MAC80211(priv, "leave\n");
2308}
2309
2310void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2311{
2312 struct iwl_priv *priv = hw->priv;
2313
2314 IWL_DEBUG_MACDUMP(priv, "enter\n");
2315
2316 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2317 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2318
2319 if (iwl4965_tx_skb(priv, skb))
2320 dev_kfree_skb_any(skb);
2321
2322 IWL_DEBUG_MACDUMP(priv, "leave\n");
2323}
2324
2325void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
2326 struct ieee80211_vif *vif,
2327 struct ieee80211_key_conf *keyconf,
2328 struct ieee80211_sta *sta,
2329 u32 iv32, u16 *phase1key)
2330{
2331 struct iwl_priv *priv = hw->priv;
2332 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2333
2334 IWL_DEBUG_MAC80211(priv, "enter\n");
2335
2336 iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2337 iv32, phase1key);
2338
2339 IWL_DEBUG_MAC80211(priv, "leave\n");
2340}
2341
2342int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2343 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2344 struct ieee80211_key_conf *key)
2345{
2346 struct iwl_priv *priv = hw->priv;
2347 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2348 struct iwl_rxon_context *ctx = vif_priv->ctx;
2349 int ret;
2350 u8 sta_id;
2351 bool is_default_wep_key = false;
2352
2353 IWL_DEBUG_MAC80211(priv, "enter\n");
2354
2355 if (priv->cfg->mod_params->sw_crypto) {
2356 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2357 return -EOPNOTSUPP;
2358 }
2359
2360 sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2361 if (sta_id == IWL_INVALID_STATION)
2362 return -EINVAL;
2363
2364 mutex_lock(&priv->mutex);
2365 iwl_legacy_scan_cancel_timeout(priv, 100);
2366
2367 /*
2368 * If we are getting WEP group key and we didn't receive any key mapping
2369 * so far, we are in legacy wep mode (group key only), otherwise we are
2370 * in 1X mode.
2371 * In legacy wep mode, we use another host command to the uCode.
2372 */
2373 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2374 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2375 !sta) {
2376 if (cmd == SET_KEY)
2377 is_default_wep_key = !ctx->key_mapping_keys;
2378 else
2379 is_default_wep_key =
2380 (key->hw_key_idx == HW_KEY_DEFAULT);
2381 }
2382
2383 switch (cmd) {
2384 case SET_KEY:
2385 if (is_default_wep_key)
2386 ret = iwl4965_set_default_wep_key(priv,
2387 vif_priv->ctx, key);
2388 else
2389 ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
2390 key, sta_id);
2391
2392 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2393 break;
2394 case DISABLE_KEY:
2395 if (is_default_wep_key)
2396 ret = iwl4965_remove_default_wep_key(priv, ctx, key);
2397 else
2398 ret = iwl4965_remove_dynamic_key(priv, ctx,
2399 key, sta_id);
2400
2401 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2402 break;
2403 default:
2404 ret = -EINVAL;
2405 }
2406
2407 mutex_unlock(&priv->mutex);
2408 IWL_DEBUG_MAC80211(priv, "leave\n");
2409
2410 return ret;
2411}
2412
2413int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2414 struct ieee80211_vif *vif,
2415 enum ieee80211_ampdu_mlme_action action,
2416 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2417 u8 buf_size)
2418{
2419 struct iwl_priv *priv = hw->priv;
2420 int ret = -EINVAL;
2421
2422 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2423 sta->addr, tid);
2424
2425 if (!(priv->cfg->sku & IWL_SKU_N))
2426 return -EACCES;
2427
2428 mutex_lock(&priv->mutex);
2429
2430 switch (action) {
2431 case IEEE80211_AMPDU_RX_START:
2432 IWL_DEBUG_HT(priv, "start Rx\n");
2433 ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
2434 break;
2435 case IEEE80211_AMPDU_RX_STOP:
2436 IWL_DEBUG_HT(priv, "stop Rx\n");
2437 ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
2438 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2439 ret = 0;
2440 break;
2441 case IEEE80211_AMPDU_TX_START:
2442 IWL_DEBUG_HT(priv, "start Tx\n");
2443 ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
2444 break;
2445 case IEEE80211_AMPDU_TX_STOP:
2446 IWL_DEBUG_HT(priv, "stop Tx\n");
2447 ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
2448 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2449 ret = 0;
2450 break;
2451 case IEEE80211_AMPDU_TX_OPERATIONAL:
2452 ret = 0;
2453 break;
2454 }
2455 mutex_unlock(&priv->mutex);
2456
2457 return ret;
2458}
2459
2460int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
2461 struct ieee80211_vif *vif,
2462 struct ieee80211_sta *sta)
2463{
2464 struct iwl_priv *priv = hw->priv;
2465 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2466 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2467 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2468 int ret;
2469 u8 sta_id;
2470
2471 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2472 sta->addr);
2473 mutex_lock(&priv->mutex);
2474 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2475 sta->addr);
2476 sta_priv->common.sta_id = IWL_INVALID_STATION;
2477
2478 atomic_set(&sta_priv->pending_frames, 0);
2479
2480 ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
2481 is_ap, sta, &sta_id);
2482 if (ret) {
2483 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2484 sta->addr, ret);
2485 /* Should we return success if return code is EEXIST ? */
2486 mutex_unlock(&priv->mutex);
2487 return ret;
2488 }
2489
2490 sta_priv->common.sta_id = sta_id;
2491
2492 /* Initialize rate scaling */
2493 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2494 sta->addr);
2495 iwl4965_rs_rate_init(priv, sta, sta_id);
2496 mutex_unlock(&priv->mutex);
2497
2498 return 0;
2499}
2500
2501void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2502 struct ieee80211_channel_switch *ch_switch)
2503{
2504 struct iwl_priv *priv = hw->priv;
2505 const struct iwl_channel_info *ch_info;
2506 struct ieee80211_conf *conf = &hw->conf;
2507 struct ieee80211_channel *channel = ch_switch->channel;
2508 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2509
2510 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2511 u16 ch;
2512
2513 IWL_DEBUG_MAC80211(priv, "enter\n");
2514
2515 mutex_lock(&priv->mutex);
2516
2517 if (iwl_legacy_is_rfkill(priv))
2518 goto out;
2519
2520 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2521 test_bit(STATUS_SCANNING, &priv->status) ||
2522 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
2523 goto out;
2524
2525 if (!iwl_legacy_is_associated_ctx(ctx))
2526 goto out;
2527
2528 if (!priv->cfg->ops->lib->set_channel_switch)
2529 goto out;
2530
2531 ch = channel->hw_value;
2532 if (le16_to_cpu(ctx->active.channel) == ch)
2533 goto out;
2534
2535 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2536 if (!iwl_legacy_is_channel_valid(ch_info)) {
2537 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2538 goto out;
2539 }
2540
2541 spin_lock_irq(&priv->lock);
2542
2543 priv->current_ht_config.smps = conf->smps_mode;
2544
2545 /* Configure HT40 channels */
2546 ctx->ht.enabled = conf_is_ht(conf);
2547 if (ctx->ht.enabled) {
2548 if (conf_is_ht40_minus(conf)) {
2549 ctx->ht.extension_chan_offset =
2550 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2551 ctx->ht.is_40mhz = true;
2552 } else if (conf_is_ht40_plus(conf)) {
2553 ctx->ht.extension_chan_offset =
2554 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2555 ctx->ht.is_40mhz = true;
2556 } else {
2557 ctx->ht.extension_chan_offset =
2558 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2559 ctx->ht.is_40mhz = false;
2560 }
2561 } else
2562 ctx->ht.is_40mhz = false;
2563
2564 if ((le16_to_cpu(ctx->staging.channel) != ch))
2565 ctx->staging.flags = 0;
2566
2567 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2568 iwl_legacy_set_rxon_ht(priv, ht_conf);
2569 iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
2570
2571 spin_unlock_irq(&priv->lock);
2572
2573 iwl_legacy_set_rate(priv);
2574 /*
2575 * at this point, staging_rxon has the
2576 * configuration for channel switch
2577 */
2578 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2579 priv->switch_channel = cpu_to_le16(ch);
2580 if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
2581 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2582 priv->switch_channel = 0;
2583 ieee80211_chswitch_done(ctx->vif, false);
2584 }
2585
2586out:
2587 mutex_unlock(&priv->mutex);
2588 IWL_DEBUG_MAC80211(priv, "leave\n");
2589}
2590
2591void iwl4965_configure_filter(struct ieee80211_hw *hw,
2592 unsigned int changed_flags,
2593 unsigned int *total_flags,
2594 u64 multicast)
2595{
2596 struct iwl_priv *priv = hw->priv;
2597 __le32 filter_or = 0, filter_nand = 0;
2598 struct iwl_rxon_context *ctx;
2599
2600#define CHK(test, flag) do { \
2601 if (*total_flags & (test)) \
2602 filter_or |= (flag); \
2603 else \
2604 filter_nand |= (flag); \
2605 } while (0)
2606
2607 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2608 changed_flags, *total_flags);
2609
2610 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2611 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2612 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
2613 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2614
2615#undef CHK
2616
2617 mutex_lock(&priv->mutex);
2618
2619 for_each_context(priv, ctx) {
2620 ctx->staging.filter_flags &= ~filter_nand;
2621 ctx->staging.filter_flags |= filter_or;
2622
2623 /*
2624 * Not committing directly because hardware can perform a scan,
2625 * but we'll eventually commit the filter flags change anyway.
2626 */
2627 }
2628
2629 mutex_unlock(&priv->mutex);
2630
2631 /*
2632 * Receiving all multicast frames is always enabled by the
2633 * default flags setup in iwl_legacy_connection_init_rx_config()
2634 * since we currently do not support programming multicast
2635 * filters into the device.
2636 */
2637 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2638 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2639}
2640
2641/*****************************************************************************
2642 *
2643 * driver setup and teardown
2644 *
2645 *****************************************************************************/
2646
2647static void iwl4965_bg_txpower_work(struct work_struct *work)
2648{
2649 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2650 txpower_work);
2651
2652 mutex_lock(&priv->mutex);
2653
2654 /* If a scan happened to start before we got here
2655 * then just return; the statistics notification will
2656 * kick off another scheduled work to compensate for
2657 * any temperature delta we missed here. */
2658 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2659 test_bit(STATUS_SCANNING, &priv->status))
2660 goto out;
2661
2662 /* Regardless of if we are associated, we must reconfigure the
2663 * TX power since frames can be sent on non-radar channels while
2664 * not associated */
2665 priv->cfg->ops->lib->send_tx_power(priv);
2666
2667 /* Update last_temperature to keep is_calib_needed from running
2668 * when it isn't needed... */
2669 priv->last_temperature = priv->temperature;
2670out:
2671 mutex_unlock(&priv->mutex);
2672}
2673
2674static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
2675{
2676 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
2677
2678 init_waitqueue_head(&priv->wait_command_queue);
2679
2680 INIT_WORK(&priv->restart, iwl4965_bg_restart);
2681 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
2682 INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
2683 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
2684 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
2685
2686 iwl_legacy_setup_scan_deferred_work(priv);
2687
2688 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
2689
2690 init_timer(&priv->statistics_periodic);
2691 priv->statistics_periodic.data = (unsigned long)priv;
2692 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
2693
2694 init_timer(&priv->watchdog);
2695 priv->watchdog.data = (unsigned long)priv;
2696 priv->watchdog.function = iwl_legacy_bg_watchdog;
2697
2698 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
2699 iwl4965_irq_tasklet, (unsigned long)priv);
2700}
2701
2702static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2703{
2704 cancel_work_sync(&priv->txpower_work);
2705 cancel_delayed_work_sync(&priv->init_alive_start);
2706 cancel_delayed_work(&priv->alive_start);
2707 cancel_work_sync(&priv->run_time_calib_work);
2708
2709 iwl_legacy_cancel_scan_deferred_work(priv);
2710
2711 del_timer_sync(&priv->statistics_periodic);
2712}
2713
2714static void iwl4965_init_hw_rates(struct iwl_priv *priv,
2715 struct ieee80211_rate *rates)
2716{
2717 int i;
2718
2719 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
2720 rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
2721 rates[i].hw_value = i; /* Rate scaling will work on indexes */
2722 rates[i].hw_value_short = i;
2723 rates[i].flags = 0;
2724 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
2725 /*
2726 * If CCK != 1M then set short preamble rate flag.
2727 */
2728 rates[i].flags |=
2729 (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
2730 0 : IEEE80211_RATE_SHORT_PREAMBLE;
2731 }
2732 }
2733}
2734/*
2735 * Acquire priv->lock before calling this function !
2736 */
2737void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
2738{
2739 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
2740 (index & 0xff) | (txq_id << 8));
2741 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
2742}
2743
2744void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
2745 struct iwl_tx_queue *txq,
2746 int tx_fifo_id, int scd_retry)
2747{
2748 int txq_id = txq->q.id;
2749
2750 /* Find out whether to activate Tx queue */
2751 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
2752
2753 /* Set up and activate */
2754 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
2755 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2756 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
2757 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
2758 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
2759 IWL49_SCD_QUEUE_STTS_REG_MSK);
2760
2761 txq->sched_retry = scd_retry;
2762
2763 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
2764 active ? "Activate" : "Deactivate",
2765 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
2766}
2767
2768
2769static int iwl4965_init_drv(struct iwl_priv *priv)
2770{
2771 int ret;
2772
2773 spin_lock_init(&priv->sta_lock);
2774 spin_lock_init(&priv->hcmd_lock);
2775
2776 INIT_LIST_HEAD(&priv->free_frames);
2777
2778 mutex_init(&priv->mutex);
2779
2780 priv->ieee_channels = NULL;
2781 priv->ieee_rates = NULL;
2782 priv->band = IEEE80211_BAND_2GHZ;
2783
2784 priv->iw_mode = NL80211_IFTYPE_STATION;
2785 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
2786 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
2787
2788 /* initialize force reset */
2789 priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
2790
2791 /* Choose which receivers/antennas to use */
2792 if (priv->cfg->ops->hcmd->set_rxon_chain)
2793 priv->cfg->ops->hcmd->set_rxon_chain(priv,
2794 &priv->contexts[IWL_RXON_CTX_BSS]);
2795
2796 iwl_legacy_init_scan_params(priv);
2797
2798 ret = iwl_legacy_init_channel_map(priv);
2799 if (ret) {
2800 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
2801 goto err;
2802 }
2803
2804 ret = iwl_legacy_init_geos(priv);
2805 if (ret) {
2806 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
2807 goto err_free_channel_map;
2808 }
2809 iwl4965_init_hw_rates(priv, priv->ieee_rates);
2810
2811 return 0;
2812
2813err_free_channel_map:
2814 iwl_legacy_free_channel_map(priv);
2815err:
2816 return ret;
2817}
2818
2819static void iwl4965_uninit_drv(struct iwl_priv *priv)
2820{
2821 iwl4965_calib_free_results(priv);
2822 iwl_legacy_free_geos(priv);
2823 iwl_legacy_free_channel_map(priv);
2824 kfree(priv->scan_cmd);
2825}
2826
2827static void iwl4965_hw_detect(struct iwl_priv *priv)
2828{
2829 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
2830 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
2831 priv->rev_id = priv->pci_dev->revision;
2832 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
2833}
2834
2835static int iwl4965_set_hw_params(struct iwl_priv *priv)
2836{
2837 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2838 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2839 if (priv->cfg->mod_params->amsdu_size_8K)
2840 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
2841 else
2842 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
2843
2844 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
2845
2846 if (priv->cfg->mod_params->disable_11n)
2847 priv->cfg->sku &= ~IWL_SKU_N;
2848
2849 /* Device-specific setup */
2850 return priv->cfg->ops->lib->set_hw_params(priv);
2851}
2852
2853static const u8 iwl4965_bss_ac_to_fifo[] = {
2854 IWL_TX_FIFO_VO,
2855 IWL_TX_FIFO_VI,
2856 IWL_TX_FIFO_BE,
2857 IWL_TX_FIFO_BK,
2858};
2859
2860static const u8 iwl4965_bss_ac_to_queue[] = {
2861 0, 1, 2, 3,
2862};
2863
2864static int
2865iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2866{
2867 int err = 0, i;
2868 struct iwl_priv *priv;
2869 struct ieee80211_hw *hw;
2870 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
2871 unsigned long flags;
2872 u16 pci_cmd;
2873
2874 /************************
2875 * 1. Allocating HW data
2876 ************************/
2877
2878 hw = iwl_legacy_alloc_all(cfg);
2879 if (!hw) {
2880 err = -ENOMEM;
2881 goto out;
2882 }
2883 priv = hw->priv;
2884 /* At this point both hw and priv are allocated. */
2885
2886 /*
2887 * The default context is always valid,
2888 * more may be discovered when firmware
2889 * is loaded.
2890 */
2891 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
2892
2893 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
2894 priv->contexts[i].ctxid = i;
2895
2896 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
2897 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
2898 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
2899 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
2900 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
2901 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
2902 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
2903 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
2904 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
2905 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
2906 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
2907 BIT(NL80211_IFTYPE_ADHOC);
2908 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
2909 BIT(NL80211_IFTYPE_STATION);
2910 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
2911 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
2912 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
2913 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
2914
2915 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
2916
2917 SET_IEEE80211_DEV(hw, &pdev->dev);
2918
2919 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
2920 priv->cfg = cfg;
2921 priv->pci_dev = pdev;
2922 priv->inta_mask = CSR_INI_SET_MASK;
2923
2924 if (iwl_legacy_alloc_traffic_mem(priv))
2925 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
2926
2927 /**************************
2928 * 2. Initializing PCI bus
2929 **************************/
2930 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2931 PCIE_LINK_STATE_CLKPM);
2932
2933 if (pci_enable_device(pdev)) {
2934 err = -ENODEV;
2935 goto out_ieee80211_free_hw;
2936 }
2937
2938 pci_set_master(pdev);
2939
2940 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2941 if (!err)
2942 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2943 if (err) {
2944 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2945 if (!err)
2946 err = pci_set_consistent_dma_mask(pdev,
2947 DMA_BIT_MASK(32));
2948 /* both attempts failed: */
2949 if (err) {
2950 IWL_WARN(priv, "No suitable DMA available.\n");
2951 goto out_pci_disable_device;
2952 }
2953 }
2954
2955 err = pci_request_regions(pdev, DRV_NAME);
2956 if (err)
2957 goto out_pci_disable_device;
2958
2959 pci_set_drvdata(pdev, priv);
2960
2961
2962 /***********************
2963 * 3. Read REV register
2964 ***********************/
2965 priv->hw_base = pci_iomap(pdev, 0, 0);
2966 if (!priv->hw_base) {
2967 err = -ENODEV;
2968 goto out_pci_release_regions;
2969 }
2970
2971 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
2972 (unsigned long long) pci_resource_len(pdev, 0));
2973 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
2974
2975 /* these spin locks will be used in apm_ops.init and EEPROM access
2976 * we should init now
2977 */
2978 spin_lock_init(&priv->reg_lock);
2979 spin_lock_init(&priv->lock);
2980
2981 /*
2982 * stop and reset the on-board processor just in case it is in a
2983 * strange state ... like being left stranded by a primary kernel
2984 * and this is now the kdump kernel trying to start up
2985 */
2986 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2987
2988 iwl4965_hw_detect(priv);
2989 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
2990 priv->cfg->name, priv->hw_rev);
2991
2992 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2993 * PCI Tx retries from interfering with C3 CPU state */
2994 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2995
2996 iwl4965_prepare_card_hw(priv);
2997 if (!priv->hw_ready) {
2998 IWL_WARN(priv, "Failed, HW not ready\n");
2999 goto out_iounmap;
3000 }
3001
3002 /*****************
3003 * 4. Read EEPROM
3004 *****************/
3005 /* Read the EEPROM */
3006 err = iwl_legacy_eeprom_init(priv);
3007 if (err) {
3008 IWL_ERR(priv, "Unable to init EEPROM\n");
3009 goto out_iounmap;
3010 }
3011 err = iwl4965_eeprom_check_version(priv);
3012 if (err)
3013 goto out_free_eeprom;
3014
3015 if (err)
3016 goto out_free_eeprom;
3017
3018 /* extract MAC Address */
3019 iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
3020 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3021 priv->hw->wiphy->addresses = priv->addresses;
3022 priv->hw->wiphy->n_addresses = 1;
3023
3024 /************************
3025 * 5. Setup HW constants
3026 ************************/
3027 if (iwl4965_set_hw_params(priv)) {
3028 IWL_ERR(priv, "failed to set hw parameters\n");
3029 goto out_free_eeprom;
3030 }
3031
3032 /*******************
3033 * 6. Setup priv
3034 *******************/
3035
3036 err = iwl4965_init_drv(priv);
3037 if (err)
3038 goto out_free_eeprom;
3039 /* At this point both hw and priv are initialized. */
3040
3041 /********************
3042 * 7. Setup services
3043 ********************/
3044 spin_lock_irqsave(&priv->lock, flags);
3045 iwl_legacy_disable_interrupts(priv);
3046 spin_unlock_irqrestore(&priv->lock, flags);
3047
3048 pci_enable_msi(priv->pci_dev);
3049
3050 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3051 IRQF_SHARED, DRV_NAME, priv);
3052 if (err) {
3053 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3054 goto out_disable_msi;
3055 }
3056
3057 iwl4965_setup_deferred_work(priv);
3058 iwl4965_setup_rx_handlers(priv);
3059
3060 /*********************************************
3061 * 8. Enable interrupts and read RFKILL state
3062 *********************************************/
3063
3064 /* enable rfkill interrupt: hw bug w/a */
3065 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3066 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3067 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3068 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3069 }
3070
3071 iwl_legacy_enable_rfkill_int(priv);
3072
3073 /* If platform's RF_KILL switch is NOT set to KILL */
3074 if (iwl_read32(priv, CSR_GP_CNTRL) &
3075 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3076 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3077 else
3078 set_bit(STATUS_RF_KILL_HW, &priv->status);
3079
3080 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3081 test_bit(STATUS_RF_KILL_HW, &priv->status));
3082
3083 iwl_legacy_power_initialize(priv);
3084
3085 init_completion(&priv->_4965.firmware_loading_complete);
3086
3087 err = iwl4965_request_firmware(priv, true);
3088 if (err)
3089 goto out_destroy_workqueue;
3090
3091 return 0;
3092
3093 out_destroy_workqueue:
3094 destroy_workqueue(priv->workqueue);
3095 priv->workqueue = NULL;
3096 free_irq(priv->pci_dev->irq, priv);
3097 out_disable_msi:
3098 pci_disable_msi(priv->pci_dev);
3099 iwl4965_uninit_drv(priv);
3100 out_free_eeprom:
3101 iwl_legacy_eeprom_free(priv);
3102 out_iounmap:
3103 pci_iounmap(pdev, priv->hw_base);
3104 out_pci_release_regions:
3105 pci_set_drvdata(pdev, NULL);
3106 pci_release_regions(pdev);
3107 out_pci_disable_device:
3108 pci_disable_device(pdev);
3109 out_ieee80211_free_hw:
3110 iwl_legacy_free_traffic_mem(priv);
3111 ieee80211_free_hw(priv->hw);
3112 out:
3113 return err;
3114}
3115
3116static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
3117{
3118 struct iwl_priv *priv = pci_get_drvdata(pdev);
3119 unsigned long flags;
3120
3121 if (!priv)
3122 return;
3123
3124 wait_for_completion(&priv->_4965.firmware_loading_complete);
3125
3126 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3127
3128 iwl_legacy_dbgfs_unregister(priv);
3129 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3130
3131 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3132 * to be called and iwl4965_down since we are removing the device
3133 * we need to set STATUS_EXIT_PENDING bit.
3134 */
3135 set_bit(STATUS_EXIT_PENDING, &priv->status);
3136
3137 iwl_legacy_leds_exit(priv);
3138
3139 if (priv->mac80211_registered) {
3140 ieee80211_unregister_hw(priv->hw);
3141 priv->mac80211_registered = 0;
3142 } else {
3143 iwl4965_down(priv);
3144 }
3145
3146 /*
3147 * Make sure device is reset to low power before unloading driver.
3148 * This may be redundant with iwl4965_down(), but there are paths to
3149 * run iwl4965_down() without calling apm_ops.stop(), and there are
3150 * paths to avoid running iwl4965_down() at all before leaving driver.
3151 * This (inexpensive) call *makes sure* device is reset.
3152 */
3153 iwl_legacy_apm_stop(priv);
3154
3155 /* make sure we flush any pending irq or
3156 * tasklet for the driver
3157 */
3158 spin_lock_irqsave(&priv->lock, flags);
3159 iwl_legacy_disable_interrupts(priv);
3160 spin_unlock_irqrestore(&priv->lock, flags);
3161
3162 iwl4965_synchronize_irq(priv);
3163
3164 iwl4965_dealloc_ucode_pci(priv);
3165
3166 if (priv->rxq.bd)
3167 iwl4965_rx_queue_free(priv, &priv->rxq);
3168 iwl4965_hw_txq_ctx_free(priv);
3169
3170 iwl_legacy_eeprom_free(priv);
3171
3172
3173 /*netif_stop_queue(dev); */
3174 flush_workqueue(priv->workqueue);
3175
3176 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3177 * priv->workqueue... so we can't take down the workqueue
3178 * until now... */
3179 destroy_workqueue(priv->workqueue);
3180 priv->workqueue = NULL;
3181 iwl_legacy_free_traffic_mem(priv);
3182
3183 free_irq(priv->pci_dev->irq, priv);
3184 pci_disable_msi(priv->pci_dev);
3185 pci_iounmap(pdev, priv->hw_base);
3186 pci_release_regions(pdev);
3187 pci_disable_device(pdev);
3188 pci_set_drvdata(pdev, NULL);
3189
3190 iwl4965_uninit_drv(priv);
3191
3192 dev_kfree_skb(priv->beacon_skb);
3193
3194 ieee80211_free_hw(priv->hw);
3195}
3196
3197/*
3198 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
3199 * must be called under priv->lock and mac access
3200 */
3201void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
3202{
3203 iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
3204}
3205
3206/*****************************************************************************
3207 *
3208 * driver and module entry point
3209 *
3210 *****************************************************************************/
3211
3212/* Hardware specific file defines the PCI IDs table for that hardware module */
3213static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
3214#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
3215 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
3216 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
3217#endif /* CONFIG_IWL4965 */
3218
3219 {0}
3220};
3221MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
3222
3223static struct pci_driver iwl4965_driver = {
3224 .name = DRV_NAME,
3225 .id_table = iwl4965_hw_card_ids,
3226 .probe = iwl4965_pci_probe,
3227 .remove = __devexit_p(iwl4965_pci_remove),
3228 .driver.pm = IWL_LEGACY_PM_OPS,
3229};
3230
3231static int __init iwl4965_init(void)
3232{
3233
3234 int ret;
3235 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3236 pr_info(DRV_COPYRIGHT "\n");
3237
3238 ret = iwl4965_rate_control_register();
3239 if (ret) {
3240 pr_err("Unable to register rate control algorithm: %d\n", ret);
3241 return ret;
3242 }
3243
3244 ret = pci_register_driver(&iwl4965_driver);
3245 if (ret) {
3246 pr_err("Unable to initialize PCI module\n");
3247 goto error_register;
3248 }
3249
3250 return ret;
3251
3252error_register:
3253 iwl4965_rate_control_unregister();
3254 return ret;
3255}
3256
3257static void __exit iwl4965_exit(void)
3258{
3259 pci_unregister_driver(&iwl4965_driver);
3260 iwl4965_rate_control_unregister();
3261}
3262
3263module_exit(iwl4965_exit);
3264module_init(iwl4965_init);
3265
3266#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3267module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
3268MODULE_PARM_DESC(debug, "debug output mask");
3269#endif
3270
3271module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
3272MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3273module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
3274MODULE_PARM_DESC(queues_num, "number of hw queues.");
3275module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
3276MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3277module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
3278 int, S_IRUGO);
3279MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3280module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
3281MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/prph.h
index 30a493003ab0..ffec4b4a248a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-prph.h
+++ b/drivers/net/wireless/iwlegacy/prph.h
@@ -60,8 +60,8 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/ 61 *****************************************************************************/
62 62
63#ifndef __iwl_legacy_prph_h__ 63#ifndef __il_prph_h__
64#define __iwl_legacy_prph_h__ 64#define __il_prph_h__
65 65
66/* 66/*
67 * Registers in this file are internal, not PCI bus memory mapped. 67 * Registers in this file are internal, not PCI bus memory mapped.
@@ -91,9 +91,9 @@
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) 91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) 92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) 93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ 94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) 95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ 96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) 97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
98 98
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
@@ -120,13 +120,13 @@
120 * 120 *
121 * 1) Initialization -- performs hardware calibration and sets up some 121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification 122 * internal data, then notifies host via "initialize alive" notification
123 * (struct iwl_init_alive_resp) that it has completed all of its work. 123 * (struct il_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program. 124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the 125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver. 126 * NIC after loading the driver.
127 * 127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This 128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct iwl_alive_resp) that it 129 * notifies host via "alive" notification (struct il_alive_resp) that it
130 * is ready to be used. 130 * is ready to be used.
131 * 131 *
132 * When initializing the NIC, the host driver does the following procedure: 132 * When initializing the NIC, the host driver does the following procedure:
@@ -189,7 +189,7 @@
189 * procedure. 189 * procedure.
190 * 190 *
191 * This save/restore method is mostly for autonomous power management during 191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and 192 * normal operation (result of C_POWER_TBL). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode, 193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory). 194 * allowing total shutdown (including BSM memory).
195 * 195 *
@@ -202,19 +202,19 @@
202 */ 202 */
203 203
204/* BSM bit fields */ 204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ 205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ 206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ 207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208 208
209/* BSM addresses */ 209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400) 210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800) 211#define BSM_END (PRPH_BASE + 0x3800)
212 212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ 213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ 214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ 215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ 216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ 217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218 218
219/* 219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore. 220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
@@ -231,8 +231,7 @@
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) 231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */ 232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) 233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */ 234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236 235
237/* 3945 Tx scheduler registers */ 236/* 3945 Tx scheduler registers */
238#define ALM_SCD_BASE (PRPH_BASE + 0x2E00) 237#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
@@ -255,7 +254,7 @@
255 * but one DMA channel may take input from several queues. 254 * but one DMA channel may take input from several queues.
256 * 255 *
257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows 256 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c): 257 * (cf. default_queue_to_tx_fifo in 4965.c):
259 * 258 *
260 * 0 -- EDCA BK (background) frames, lowest priority 259 * 0 -- EDCA BK (background) frames, lowest priority
261 * 1 -- EDCA BE (best effort) frames, normal priority 260 * 1 -- EDCA BE (best effort) frames, normal priority
@@ -274,20 +273,20 @@
274 * The driver sets up each queue to work in one of two modes: 273 * The driver sets up each queue to work in one of two modes:
275 * 274 *
276 * 1) Scheduler-Ack, in which the scheduler automatically supports a 275 * 1) Scheduler-Ack, in which the scheduler automatically supports a
277 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue 276 * block-ack (BA) win of up to 64 TFDs. In this mode, each queue
278 * contains TFDs for a unique combination of Recipient Address (RA) 277 * contains TFDs for a unique combination of Recipient Address (RA)
279 * and Traffic Identifier (TID), that is, traffic of a given 278 * and Traffic Identifier (TID), that is, traffic of a given
280 * Quality-Of-Service (QOS) priority, destined for a single station. 279 * Quality-Of-Service (QOS) priority, destined for a single station.
281 * 280 *
282 * In scheduler-ack mode, the scheduler keeps track of the Tx status of 281 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
283 * each frame within the BA window, including whether it's been transmitted, 282 * each frame within the BA win, including whether it's been transmitted,
284 * and whether it's been acknowledged by the receiving station. The device 283 * and whether it's been acknowledged by the receiving station. The device
285 * automatically processes block-acks received from the receiving STA, 284 * automatically processes block-acks received from the receiving STA,
286 * and reschedules un-acked frames to be retransmitted (successful 285 * and reschedules un-acked frames to be retransmitted (successful
287 * Tx completion may end up being out-of-order). 286 * Tx completion may end up being out-of-order).
288 * 287 *
289 * The driver must maintain the queue's Byte Count table in host DRAM 288 * The driver must maintain the queue's Byte Count table in host DRAM
290 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode. 289 * (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
291 * This mode does not support fragmentation. 290 * This mode does not support fragmentation.
292 * 291 *
293 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. 292 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
@@ -316,34 +315,34 @@
316 */ 315 */
317 316
318/** 317/**
319 * Max Tx window size is the max number of contiguous TFDs that the scheduler 318 * Max Tx win size is the max number of contiguous TFDs that the scheduler
320 * can keep track of at one time when creating block-ack chains of frames. 319 * can keep track of at one time when creating block-ack chains of frames.
321 * Note that "64" matches the number of ack bits in a block-ack packet. 320 * Note that "64" matches the number of ack bits in a block-ack packet.
322 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize 321 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
323 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. 322 * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
324 */ 323 */
325#define SCD_WIN_SIZE 64 324#define SCD_WIN_SIZE 64
326#define SCD_FRAME_LIMIT 64 325#define SCD_FRAME_LIMIT 64
327 326
328/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */ 327/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
329#define IWL49_SCD_START_OFFSET 0xa02c00 328#define IL49_SCD_START_OFFSET 0xa02c00
330 329
331/* 330/*
332 * 4965 tells driver SRAM address for internal scheduler structs via this reg. 331 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
333 * Value is valid only after "Alive" response from uCode. 332 * Value is valid only after "Alive" response from uCode.
334 */ 333 */
335#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0) 334#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0)
336 335
337/* 336/*
338 * Driver may need to update queue-empty bits after changing queue's 337 * Driver may need to update queue-empty bits after changing queue's
339 * write and read pointers (indexes) during (re-)initialization (i.e. when 338 * write and read pointers (idxes) during (re-)initialization (i.e. when
340 * scheduler is not tracking what's happening). 339 * scheduler is not tracking what's happening).
341 * Bit fields: 340 * Bit fields:
342 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit 341 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
343 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty 342 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
344 * NOTE: This register is not used by Linux driver. 343 * NOTE: This register is not used by Linux driver.
345 */ 344 */
346#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4) 345#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4)
347 346
348/* 347/*
349 * Physical base address of array of byte count (BC) circular buffers (CBs). 348 * Physical base address of array of byte count (BC) circular buffers (CBs).
@@ -351,11 +350,11 @@
351 * This register points to BC CB for queue 0, must be on 1024-byte boundary. 350 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
352 * Others are spaced by 1024 bytes. 351 * Others are spaced by 1024 bytes.
353 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad. 352 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
354 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff). 353 * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
355 * Bit fields: 354 * Bit fields:
356 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned. 355 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
357 */ 356 */
358#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10) 357#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10)
359 358
360/* 359/*
361 * Enables any/all Tx DMA/FIFO channels. 360 * Enables any/all Tx DMA/FIFO channels.
@@ -364,23 +363,23 @@
364 * Bit fields: 363 * Bit fields:
365 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7 364 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
366 */ 365 */
367#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c) 366#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c)
368/* 367/*
369 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue. 368 * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
370 * Initialized and updated by driver as new TFDs are added to queue. 369 * Initialized and updated by driver as new TFDs are added to queue.
371 * NOTE: If using Block Ack, index must correspond to frame's 370 * NOTE: If using Block Ack, idx must correspond to frame's
372 * Start Sequence Number; index = (SSN & 0xff) 371 * Start Sequence Number; idx = (SSN & 0xff)
373 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses? 372 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
374 */ 373 */
375#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4) 374#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
376 375
377/* 376/*
378 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue. 377 * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
379 * For FIFO mode, index indicates next frame to transmit. 378 * For FIFO mode, idx indicates next frame to transmit.
380 * For Scheduler-ACK mode, index indicates first frame in Tx window. 379 * For Scheduler-ACK mode, idx indicates first frame in Tx win.
381 * Initialized by driver, updated by scheduler. 380 * Initialized by driver, updated by scheduler.
382 */ 381 */
383#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4) 382#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
384 383
385/* 384/*
386 * Select which queues work in chain mode (1) vs. not (0). 385 * Select which queues work in chain mode (1) vs. not (0).
@@ -391,18 +390,18 @@
391 * NOTE: If driver sets up queue for chain mode, it should be also set up 390 * NOTE: If driver sets up queue for chain mode, it should be also set up
392 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x). 391 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
393 */ 392 */
394#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0) 393#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0)
395 394
396/* 395/*
397 * Select which queues interrupt driver when scheduler increments 396 * Select which queues interrupt driver when scheduler increments
398 * a queue's read pointer (index). 397 * a queue's read pointer (idx).
399 * Bit fields: 398 * Bit fields:
400 * 31-16: Reserved 399 * 31-16: Reserved
401 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled 400 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
402 * NOTE: This functionality is apparently a no-op; driver relies on interrupts 401 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
403 * from Rx queue to read Tx command responses and update Tx queues. 402 * from Rx queue to read Tx command responses and update Tx queues.
404 */ 403 */
405#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4) 404#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4)
406 405
407/* 406/*
408 * Queue search status registers. One for each queue. 407 * Queue search status registers. One for each queue.
@@ -414,7 +413,7 @@
414 * Driver should init to "1" for aggregation mode, or "0" otherwise. 413 * Driver should init to "1" for aggregation mode, or "0" otherwise.
415 * 7-6: Driver should init to "0" 414 * 7-6: Driver should init to "0"
416 * 5: Window Size Left; indicates whether scheduler can request 415 * 5: Window Size Left; indicates whether scheduler can request
417 * another TFD, based on window size, etc. Driver should init 416 * another TFD, based on win size, etc. Driver should init
418 * this bit to "1" for aggregation mode, or "0" for non-agg. 417 * this bit to "1" for aggregation mode, or "0" for non-agg.
419 * 4-1: Tx FIFO to use (range 0-7). 418 * 4-1: Tx FIFO to use (range 0-7).
420 * 0: Queue is active (1), not active (0). 419 * 0: Queue is active (1), not active (0).
@@ -423,18 +422,18 @@
423 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled 422 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
424 * via SCD_QUEUECHAIN_SEL. 423 * via SCD_QUEUECHAIN_SEL.
425 */ 424 */
426#define IWL49_SCD_QUEUE_STATUS_BITS(x)\ 425#define IL49_SCD_QUEUE_STATUS_BITS(x)\
427 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4) 426 (IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
428 427
429/* Bit field positions */ 428/* Bit field positions */
430#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) 429#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
431#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1) 430#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
432#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5) 431#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
433#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) 432#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
434 433
435/* Write masks */ 434/* Write masks */
436#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) 435#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
437#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) 436#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
438 437
439/** 438/**
440 * 4965 internal SRAM structures for scheduler, shared with driver ... 439 * 4965 internal SRAM structures for scheduler, shared with driver ...
@@ -460,7 +459,7 @@
460 * each queue's entry as follows: 459 * each queue's entry as follows:
461 * 460 *
462 * LS Dword bit fields: 461 * LS Dword bit fields:
463 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64. 462 * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64.
464 * 463 *
465 * MS Dword bit fields: 464 * MS Dword bit fields:
466 * 16-22: Frame limit. Driver should init to 10 (0xa). 465 * 16-22: Frame limit. Driver should init to 10 (0xa).
@@ -470,14 +469,14 @@
470 * Init must be done after driver receives "Alive" response from 4965 uCode, 469 * Init must be done after driver receives "Alive" response from 4965 uCode,
471 * and when setting up queue for aggregation. 470 * and when setting up queue for aggregation.
472 */ 471 */
473#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380 472#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380
474#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ 473#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
475 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) 474 (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
476 475
477#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) 476#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
478#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) 477#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
479#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) 478#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
480#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) 479#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
481 480
482/* 481/*
483 * Tx Status Bitmap 482 * Tx Status Bitmap
@@ -486,7 +485,7 @@
486 * "Alive" notification from uCode. Area is used only by device itself; 485 * "Alive" notification from uCode. Area is used only by device itself;
487 * no other support (besides clearing) is required from driver. 486 * no other support (besides clearing) is required from driver.
488 */ 487 */
489#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 488#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
490 489
491/* 490/*
492 * RAxTID to queue translation mapping. 491 * RAxTID to queue translation mapping.
@@ -494,7 +493,7 @@
494 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be 493 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
495 * for only one combination of receiver address (RA) and traffic ID (TID), i.e. 494 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
496 * one QOS priority level destined for one station (for this wireless link, 495 * one QOS priority level destined for one station (for this wireless link,
497 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit 496 * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit
498 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK 497 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
499 * mode, the device ignores the mapping value. 498 * mode, the device ignores the mapping value.
500 * 499 *
@@ -508,16 +507,16 @@
508 * must read a dword-aligned value from device SRAM, replace the 16-bit map 507 * must read a dword-aligned value from device SRAM, replace the 16-bit map
509 * value of interest, and write the dword value back into device SRAM. 508 * value of interest, and write the dword value back into device SRAM.
510 */ 509 */
511#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500 510#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500
512 511
513/* Find translation table dword to read/write for given queue */ 512/* Find translation table dword to read/write for given queue */
514#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ 513#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
515 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) 514 ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
516 515
517#define IWL_SCD_TXFIFO_POS_TID (0) 516#define IL_SCD_TXFIFO_POS_TID (0)
518#define IWL_SCD_TXFIFO_POS_RA (4) 517#define IL_SCD_TXFIFO_POS_RA (4)
519#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) 518#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
520 519
521/*********************** END TX SCHEDULER *************************************/ 520/*********************** END TX SCHEDULER *************************************/
522 521
523#endif /* __iwl_legacy_prph_h__ */ 522#endif /* __il_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 57703d5209d7..ae08498dfcad 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -102,12 +102,28 @@ config IWLWIFI_DEVICE_TRACING
102 occur. 102 occur.
103endmenu 103endmenu
104 104
105config IWLWIFI_DEVICE_SVTOOL 105config IWLWIFI_DEVICE_TESTMODE
106 bool "iwlwifi device svtool support" 106 def_bool y
107 depends on IWLWIFI 107 depends on IWLWIFI
108 select NL80211_TESTMODE 108 depends on NL80211_TESTMODE
109 help 109 help
110 This option enables the svtool support for iwlwifi device through 110 This option enables the testmode support for iwlwifi device through
111 NL80211_TESTMODE. svtool is a software validation tool that runs in 111 NL80211_TESTMODE. This provide the capabilities of enable user space
112 the user space and interacts with the device in the kernel space 112 validation applications to interacts with the device through the
113 through the generic netlink message via NL80211_TESTMODE channel. 113 generic netlink message via NL80211_TESTMODE channel.
114
115config IWLWIFI_P2P
116 bool "iwlwifi experimental P2P support"
117 depends on IWLWIFI
118 help
119 This option enables experimental P2P support for some devices
120 based on microcode support. Since P2P support is still under
121 development, this option may even enable it for some devices
122 now that turn out to not support it in the future due to
123 microcode restrictions.
124
125 To determine if your microcode supports the experimental P2P
126 offered by this option, check if the driver advertises AP
127 support when it is loaded.
128
129 Say Y only if you want to experiment with P2P.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index a7ab280994c8..9dc84a7354db 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,7 +1,7 @@
1# WIFI 1# WIFI
2obj-$(CONFIG_IWLWIFI) += iwlwifi.o 2obj-$(CONFIG_IWLWIFI) += iwlwifi.o
3iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o 3iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
4iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o 4iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o
5iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o 5iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
6iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o 6iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
7 7
@@ -18,7 +18,7 @@ iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
18 18
19iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 19iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
20iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 20iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
21iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o 21iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
22 22
23CFLAGS_iwl-devtrace.o := -I$(src) 23CFLAGS_iwl-devtrace.o := -I$(src)
24 24
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index e12b48c2cff6..8d3bad7ea5d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -147,16 +147,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
147 iwl1000_set_ct_threshold(priv); 147 iwl1000_set_ct_threshold(priv);
148 148
149 /* Set initial sensitivity parameters */ 149 /* Set initial sensitivity parameters */
150 /* Set initial calibration set */
151 hw_params(priv).sens = &iwl1000_sensitivity; 150 hw_params(priv).sens = &iwl1000_sensitivity;
152 hw_params(priv).calib_init_cfg =
153 BIT(IWL_CALIB_XTAL) |
154 BIT(IWL_CALIB_LO) |
155 BIT(IWL_CALIB_TX_IQ) |
156 BIT(IWL_CALIB_TX_IQ_PERD) |
157 BIT(IWL_CALIB_BASE_BAND);
158 if (priv->cfg->need_dc_calib)
159 hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
160 151
161 return 0; 152 return 0;
162} 153}
@@ -191,6 +182,7 @@ static struct iwl_base_params iwl1000_base_params = {
191 .chain_noise_scale = 1000, 182 .chain_noise_scale = 1000,
192 .wd_timeout = IWL_DEF_WD_TIMEOUT, 183 .wd_timeout = IWL_DEF_WD_TIMEOUT,
193 .max_event_log_size = 128, 184 .max_event_log_size = 128,
185 .wd_disable = true,
194}; 186};
195static struct iwl_ht_params iwl1000_ht_params = { 187static struct iwl_ht_params iwl1000_ht_params = {
196 .ht_greenfield_support = true, 188 .ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index b3193571ed07..0c4688d95b65 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -143,17 +143,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
143 iwl2000_set_ct_threshold(priv); 143 iwl2000_set_ct_threshold(priv);
144 144
145 /* Set initial sensitivity parameters */ 145 /* Set initial sensitivity parameters */
146 /* Set initial calibration set */
147 hw_params(priv).sens = &iwl2000_sensitivity; 146 hw_params(priv).sens = &iwl2000_sensitivity;
148 hw_params(priv).calib_init_cfg =
149 BIT(IWL_CALIB_XTAL) |
150 BIT(IWL_CALIB_LO) |
151 BIT(IWL_CALIB_TX_IQ) |
152 BIT(IWL_CALIB_BASE_BAND);
153 if (priv->cfg->need_dc_calib)
154 hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
155 if (priv->cfg->need_temp_offset_calib)
156 hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
157 147
158 return 0; 148 return 0;
159} 149}
@@ -258,7 +248,6 @@ static struct iwl_bt_params iwl2030_bt_params = {
258 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 248 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
259 .lib = &iwl2000_lib, \ 249 .lib = &iwl2000_lib, \
260 .base_params = &iwl2000_base_params, \ 250 .base_params = &iwl2000_base_params, \
261 .need_dc_calib = true, \
262 .need_temp_offset_calib = true, \ 251 .need_temp_offset_calib = true, \
263 .temp_offset_v2 = true, \ 252 .temp_offset_v2 = true, \
264 .led_mode = IWL_LED_RF_STATE, \ 253 .led_mode = IWL_LED_RF_STATE, \
@@ -286,7 +275,6 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
286 .lib = &iwl2030_lib, \ 275 .lib = &iwl2030_lib, \
287 .base_params = &iwl2030_base_params, \ 276 .base_params = &iwl2030_base_params, \
288 .bt_params = &iwl2030_bt_params, \ 277 .bt_params = &iwl2030_bt_params, \
289 .need_dc_calib = true, \
290 .need_temp_offset_calib = true, \ 278 .need_temp_offset_calib = true, \
291 .temp_offset_v2 = true, \ 279 .temp_offset_v2 = true, \
292 .led_mode = IWL_LED_RF_STATE, \ 280 .led_mode = IWL_LED_RF_STATE, \
@@ -308,7 +296,6 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
308 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 296 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
309 .lib = &iwl2000_lib, \ 297 .lib = &iwl2000_lib, \
310 .base_params = &iwl2000_base_params, \ 298 .base_params = &iwl2000_base_params, \
311 .need_dc_calib = true, \
312 .need_temp_offset_calib = true, \ 299 .need_temp_offset_calib = true, \
313 .temp_offset_v2 = true, \ 300 .temp_offset_v2 = true, \
314 .led_mode = IWL_LED_RF_STATE, \ 301 .led_mode = IWL_LED_RF_STATE, \
@@ -338,7 +325,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
338 .lib = &iwl2030_lib, \ 325 .lib = &iwl2030_lib, \
339 .base_params = &iwl2030_base_params, \ 326 .base_params = &iwl2030_base_params, \
340 .bt_params = &iwl2030_bt_params, \ 327 .bt_params = &iwl2030_bt_params, \
341 .need_dc_calib = true, \
342 .need_temp_offset_calib = true, \ 328 .need_temp_offset_calib = true, \
343 .temp_offset_v2 = true, \ 329 .temp_offset_v2 = true, \
344 .led_mode = IWL_LED_RF_STATE, \ 330 .led_mode = IWL_LED_RF_STATE, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c511c98a89a8..6706d7c10bd8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -134,10 +134,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
134 134
135#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) 135#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
136 136
137static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) 137static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
138{ 138{
139 u16 temperature, voltage; 139 u16 temperature, voltage;
140 __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv, 140 __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
141 EEPROM_KELVIN_TEMPERATURE); 141 EEPROM_KELVIN_TEMPERATURE);
142 142
143 temperature = le16_to_cpu(temp_calib[0]); 143 temperature = le16_to_cpu(temp_calib[0]);
@@ -151,7 +151,7 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
151{ 151{
152 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; 152 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
153 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - 153 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
154 iwl_temp_calib_to_offset(priv); 154 iwl_temp_calib_to_offset(priv->shrd);
155 155
156 hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef; 156 hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
157} 157}
@@ -186,14 +186,7 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
186 iwl5000_set_ct_threshold(priv); 186 iwl5000_set_ct_threshold(priv);
187 187
188 /* Set initial sensitivity parameters */ 188 /* Set initial sensitivity parameters */
189 /* Set initial calibration set */
190 hw_params(priv).sens = &iwl5000_sensitivity; 189 hw_params(priv).sens = &iwl5000_sensitivity;
191 hw_params(priv).calib_init_cfg =
192 BIT(IWL_CALIB_XTAL) |
193 BIT(IWL_CALIB_LO) |
194 BIT(IWL_CALIB_TX_IQ) |
195 BIT(IWL_CALIB_TX_IQ_PERD) |
196 BIT(IWL_CALIB_BASE_BAND);
197 190
198 return 0; 191 return 0;
199} 192}
@@ -222,14 +215,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
222 iwl5150_set_ct_threshold(priv); 215 iwl5150_set_ct_threshold(priv);
223 216
224 /* Set initial sensitivity parameters */ 217 /* Set initial sensitivity parameters */
225 /* Set initial calibration set */
226 hw_params(priv).sens = &iwl5150_sensitivity; 218 hw_params(priv).sens = &iwl5150_sensitivity;
227 hw_params(priv).calib_init_cfg =
228 BIT(IWL_CALIB_LO) |
229 BIT(IWL_CALIB_TX_IQ) |
230 BIT(IWL_CALIB_BASE_BAND);
231 if (priv->cfg->need_dc_calib)
232 hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
233 219
234 return 0; 220 return 0;
235} 221}
@@ -237,7 +223,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
237static void iwl5150_temperature(struct iwl_priv *priv) 223static void iwl5150_temperature(struct iwl_priv *priv)
238{ 224{
239 u32 vt = 0; 225 u32 vt = 0;
240 s32 offset = iwl_temp_calib_to_offset(priv); 226 s32 offset = iwl_temp_calib_to_offset(priv->shrd);
241 227
242 vt = le32_to_cpu(priv->statistics.common.temperature); 228 vt = le32_to_cpu(priv->statistics.common.temperature);
243 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset; 229 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
@@ -364,6 +350,7 @@ static struct iwl_base_params iwl5000_base_params = {
364 .wd_timeout = IWL_LONG_WD_TIMEOUT, 350 .wd_timeout = IWL_LONG_WD_TIMEOUT,
365 .max_event_log_size = 512, 351 .max_event_log_size = 512,
366 .no_idle_support = true, 352 .no_idle_support = true,
353 .wd_disable = true,
367}; 354};
368static struct iwl_ht_params iwl5000_ht_params = { 355static struct iwl_ht_params iwl5000_ht_params = {
369 .ht_greenfield_support = true, 356 .ht_greenfield_support = true,
@@ -433,7 +420,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
433 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ 420 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
434 .lib = &iwl5150_lib, \ 421 .lib = &iwl5150_lib, \
435 .base_params = &iwl5000_base_params, \ 422 .base_params = &iwl5000_base_params, \
436 .need_dc_calib = true, \ 423 .no_xtal_calib = true, \
437 .led_mode = IWL_LED_BLINK, \ 424 .led_mode = IWL_LED_BLINK, \
438 .internal_wimax_coex = true 425 .internal_wimax_coex = true
439 426
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index ee3363fdf309..3e277b6774f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -46,11 +46,12 @@
46#include "iwl-cfg.h" 46#include "iwl-cfg.h"
47 47
48/* Highest firmware API version supported */ 48/* Highest firmware API version supported */
49#define IWL6000_UCODE_API_MAX 4 49#define IWL6000_UCODE_API_MAX 6
50#define IWL6050_UCODE_API_MAX 5 50#define IWL6050_UCODE_API_MAX 5
51#define IWL6000G2_UCODE_API_MAX 6 51#define IWL6000G2_UCODE_API_MAX 6
52 52
53/* Oldest version we won't warn about */ 53/* Oldest version we won't warn about */
54#define IWL6000_UCODE_API_OK 4
54#define IWL6000G2_UCODE_API_OK 5 55#define IWL6000G2_UCODE_API_OK 5
55 56
56/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
@@ -80,7 +81,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
80static void iwl6050_additional_nic_config(struct iwl_priv *priv) 81static void iwl6050_additional_nic_config(struct iwl_priv *priv)
81{ 82{
82 /* Indicate calibration version to uCode. */ 83 /* Indicate calibration version to uCode. */
83 if (iwlagn_eeprom_calib_version(priv) >= 6) 84 if (iwl_eeprom_calib_version(priv->shrd) >= 6)
84 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, 85 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
85 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 86 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
86} 87}
@@ -88,7 +89,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
88static void iwl6150_additional_nic_config(struct iwl_priv *priv) 89static void iwl6150_additional_nic_config(struct iwl_priv *priv)
89{ 90{
90 /* Indicate calibration version to uCode. */ 91 /* Indicate calibration version to uCode. */
91 if (iwlagn_eeprom_calib_version(priv) >= 6) 92 if (iwl_eeprom_calib_version(priv->shrd) >= 6)
92 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, 93 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
93 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 94 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
94 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG, 95 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
@@ -164,17 +165,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
164 iwl6000_set_ct_threshold(priv); 165 iwl6000_set_ct_threshold(priv);
165 166
166 /* Set initial sensitivity parameters */ 167 /* Set initial sensitivity parameters */
167 /* Set initial calibration set */
168 hw_params(priv).sens = &iwl6000_sensitivity; 168 hw_params(priv).sens = &iwl6000_sensitivity;
169 hw_params(priv).calib_init_cfg =
170 BIT(IWL_CALIB_XTAL) |
171 BIT(IWL_CALIB_LO) |
172 BIT(IWL_CALIB_TX_IQ) |
173 BIT(IWL_CALIB_BASE_BAND);
174 if (priv->cfg->need_dc_calib)
175 hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
176 if (priv->cfg->need_temp_offset_calib)
177 hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
178 169
179 return 0; 170 return 0;
180} 171}
@@ -364,7 +355,6 @@ static struct iwl_bt_params iwl6000_bt_params = {
364 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 355 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
365 .lib = &iwl6000_lib, \ 356 .lib = &iwl6000_lib, \
366 .base_params = &iwl6000_g2_base_params, \ 357 .base_params = &iwl6000_g2_base_params, \
367 .need_dc_calib = true, \
368 .need_temp_offset_calib = true, \ 358 .need_temp_offset_calib = true, \
369 .led_mode = IWL_LED_RF_STATE 359 .led_mode = IWL_LED_RF_STATE
370 360
@@ -406,7 +396,6 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
406 .lib = &iwl6030_lib, \ 396 .lib = &iwl6030_lib, \
407 .base_params = &iwl6000_g2_base_params, \ 397 .base_params = &iwl6000_g2_base_params, \
408 .bt_params = &iwl6000_bt_params, \ 398 .bt_params = &iwl6000_bt_params, \
409 .need_dc_calib = true, \
410 .need_temp_offset_calib = true, \ 399 .need_temp_offset_calib = true, \
411 .led_mode = IWL_LED_RF_STATE, \ 400 .led_mode = IWL_LED_RF_STATE, \
412 .adv_pm = true \ 401 .adv_pm = true \
@@ -469,6 +458,7 @@ struct iwl_cfg iwl130_bg_cfg = {
469#define IWL_DEVICE_6000i \ 458#define IWL_DEVICE_6000i \
470 .fw_name_pre = IWL6000_FW_PRE, \ 459 .fw_name_pre = IWL6000_FW_PRE, \
471 .ucode_api_max = IWL6000_UCODE_API_MAX, \ 460 .ucode_api_max = IWL6000_UCODE_API_MAX, \
461 .ucode_api_ok = IWL6000_UCODE_API_OK, \
472 .ucode_api_min = IWL6000_UCODE_API_MIN, \ 462 .ucode_api_min = IWL6000_UCODE_API_MIN, \
473 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ 463 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
474 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ 464 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
@@ -506,7 +496,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
506 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 496 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
507 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 497 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
508 .base_params = &iwl6050_base_params, \ 498 .base_params = &iwl6050_base_params, \
509 .need_dc_calib = true, \
510 .led_mode = IWL_LED_BLINK, \ 499 .led_mode = IWL_LED_BLINK, \
511 .internal_wimax_coex = true 500 .internal_wimax_coex = true
512 501
@@ -530,7 +519,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
530 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ 519 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
531 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ 520 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
532 .base_params = &iwl6050_base_params, \ 521 .base_params = &iwl6050_base_params, \
533 .need_dc_calib = true, \
534 .led_mode = IWL_LED_BLINK, \ 522 .led_mode = IWL_LED_BLINK, \
535 .internal_wimax_coex = true 523 .internal_wimax_coex = true
536 524
@@ -549,17 +537,17 @@ struct iwl_cfg iwl6000_3agn_cfg = {
549 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN", 537 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
550 .fw_name_pre = IWL6000_FW_PRE, 538 .fw_name_pre = IWL6000_FW_PRE,
551 .ucode_api_max = IWL6000_UCODE_API_MAX, 539 .ucode_api_max = IWL6000_UCODE_API_MAX,
540 .ucode_api_ok = IWL6000_UCODE_API_OK,
552 .ucode_api_min = IWL6000_UCODE_API_MIN, 541 .ucode_api_min = IWL6000_UCODE_API_MIN,
553 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 542 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
554 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 543 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
555 .lib = &iwl6000_lib, 544 .lib = &iwl6000_lib,
556 .base_params = &iwl6000_base_params, 545 .base_params = &iwl6000_base_params,
557 .ht_params = &iwl6000_ht_params, 546 .ht_params = &iwl6000_ht_params,
558 .need_dc_calib = true,
559 .led_mode = IWL_LED_BLINK, 547 .led_mode = IWL_LED_BLINK,
560}; 548};
561 549
562MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 550MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
563MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 551MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
564MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 552MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
565MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 553MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 03bac48558b2..16971a020297 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -82,56 +82,64 @@ struct statistics_general_data {
82 u32 beacon_energy_c; 82 u32 beacon_energy_c;
83}; 83};
84 84
85int iwl_send_calib_results(struct iwl_priv *priv) 85int iwl_send_calib_results(struct iwl_trans *trans)
86{ 86{
87 int ret = 0;
88 int i = 0;
89
90 struct iwl_host_cmd hcmd = { 87 struct iwl_host_cmd hcmd = {
91 .id = REPLY_PHY_CALIBRATION_CMD, 88 .id = REPLY_PHY_CALIBRATION_CMD,
92 .flags = CMD_SYNC, 89 .flags = CMD_SYNC,
93 }; 90 };
94 91 struct iwl_calib_result *res;
95 for (i = 0; i < IWL_CALIB_MAX; i++) { 92
96 if ((BIT(i) & hw_params(priv).calib_init_cfg) && 93 list_for_each_entry(res, &trans->calib_results, list) {
97 priv->calib_results[i].buf) { 94 int ret;
98 hcmd.len[0] = priv->calib_results[i].buf_len; 95
99 hcmd.data[0] = priv->calib_results[i].buf; 96 hcmd.len[0] = res->cmd_len;
100 hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; 97 hcmd.data[0] = &res->hdr;
101 ret = iwl_trans_send_cmd(trans(priv), &hcmd); 98 hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
102 if (ret) { 99 ret = iwl_trans_send_cmd(trans, &hcmd);
103 IWL_ERR(priv, "Error %d iteration %d\n", 100 if (ret) {
104 ret, i); 101 IWL_ERR(trans, "Error %d on calib cmd %d\n",
105 break; 102 ret, res->hdr.op_code);
106 } 103 return ret;
107 } 104 }
108 } 105 }
109 106
110 return ret; 107 return 0;
111} 108}
112 109
113int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len) 110int iwl_calib_set(struct iwl_trans *trans,
111 const struct iwl_calib_hdr *cmd, int len)
114{ 112{
115 if (res->buf_len != len) { 113 struct iwl_calib_result *res, *tmp;
116 kfree(res->buf); 114
117 res->buf = kzalloc(len, GFP_ATOMIC); 115 res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
118 } 116 GFP_ATOMIC);
119 if (unlikely(res->buf == NULL)) 117 if (!res)
120 return -ENOMEM; 118 return -ENOMEM;
119 memcpy(&res->hdr, cmd, len);
120 res->cmd_len = len;
121
122 list_for_each_entry(tmp, &trans->calib_results, list) {
123 if (tmp->hdr.op_code == res->hdr.op_code) {
124 list_replace(&tmp->list, &res->list);
125 kfree(tmp);
126 return 0;
127 }
128 }
129
130 /* wasn't in list already */
131 list_add_tail(&res->list, &trans->calib_results);
121 132
122 res->buf_len = len;
123 memcpy(res->buf, buf, len);
124 return 0; 133 return 0;
125} 134}
126 135
127void iwl_calib_free_results(struct iwl_priv *priv) 136void iwl_calib_free_results(struct iwl_trans *trans)
128{ 137{
129 int i; 138 struct iwl_calib_result *res, *tmp;
130 139
131 for (i = 0; i < IWL_CALIB_MAX; i++) { 140 list_for_each_entry_safe(res, tmp, &trans->calib_results, list) {
132 kfree(priv->calib_results[i].buf); 141 list_del(&res->list);
133 priv->calib_results[i].buf = NULL; 142 kfree(res);
134 priv->calib_results[i].buf_len = 0;
135 } 143 }
136} 144}
137 145
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index a869fc9205d2..10275ce92bde 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -72,8 +72,4 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv);
72void iwl_init_sensitivity(struct iwl_priv *priv); 72void iwl_init_sensitivity(struct iwl_priv *priv);
73void iwl_reset_run_time_calib(struct iwl_priv *priv); 73void iwl_reset_run_time_calib(struct iwl_priv *priv);
74 74
75int iwl_send_calib_results(struct iwl_priv *priv);
76int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
77void iwl_calib_free_results(struct iwl_priv *priv);
78
79#endif /* __iwl_calib_h__ */ 75#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 0bc962217351..057f95233567 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -92,11 +92,11 @@ void iwlagn_temperature(struct iwl_priv *priv)
92 iwl_tt_handler(priv); 92 iwl_tt_handler(priv);
93} 93}
94 94
95u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv) 95u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
96{ 96{
97 struct iwl_eeprom_calib_hdr *hdr; 97 struct iwl_eeprom_calib_hdr *hdr;
98 98
99 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, 99 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
100 EEPROM_CALIB_ALL); 100 EEPROM_CALIB_ALL);
101 return hdr->version; 101 return hdr->version;
102 102
@@ -105,7 +105,7 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
105/* 105/*
106 * EEPROM 106 * EEPROM
107 */ 107 */
108static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address) 108static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
109{ 109{
110 u16 offset = 0; 110 u16 offset = 0;
111 111
@@ -114,31 +114,31 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
114 114
115 switch (address & INDIRECT_TYPE_MSK) { 115 switch (address & INDIRECT_TYPE_MSK) {
116 case INDIRECT_HOST: 116 case INDIRECT_HOST:
117 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST); 117 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
118 break; 118 break;
119 case INDIRECT_GENERAL: 119 case INDIRECT_GENERAL:
120 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL); 120 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
121 break; 121 break;
122 case INDIRECT_REGULATORY: 122 case INDIRECT_REGULATORY:
123 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY); 123 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
124 break; 124 break;
125 case INDIRECT_TXP_LIMIT: 125 case INDIRECT_TXP_LIMIT:
126 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT); 126 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
127 break; 127 break;
128 case INDIRECT_TXP_LIMIT_SIZE: 128 case INDIRECT_TXP_LIMIT_SIZE:
129 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE); 129 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
130 break; 130 break;
131 case INDIRECT_CALIBRATION: 131 case INDIRECT_CALIBRATION:
132 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION); 132 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
133 break; 133 break;
134 case INDIRECT_PROCESS_ADJST: 134 case INDIRECT_PROCESS_ADJST:
135 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST); 135 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
136 break; 136 break;
137 case INDIRECT_OTHERS: 137 case INDIRECT_OTHERS:
138 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS); 138 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
139 break; 139 break;
140 default: 140 default:
141 IWL_ERR(priv, "illegal indirect type: 0x%X\n", 141 IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
142 address & INDIRECT_TYPE_MSK); 142 address & INDIRECT_TYPE_MSK);
143 break; 143 break;
144 } 144 }
@@ -147,11 +147,11 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
147 return (address & ADDRESS_MSK) + (offset << 1); 147 return (address & ADDRESS_MSK) + (offset << 1);
148} 148}
149 149
150const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) 150const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
151{ 151{
152 u32 address = eeprom_indirect_address(priv, offset); 152 u32 address = eeprom_indirect_address(shrd, offset);
153 BUG_ON(address >= priv->cfg->base_params->eeprom_size); 153 BUG_ON(address >= shrd->priv->cfg->base_params->eeprom_size);
154 return &priv->eeprom[address]; 154 return &shrd->eeprom[address];
155} 155}
156 156
157struct iwl_mod_params iwlagn_mod_params = { 157struct iwl_mod_params iwlagn_mod_params = {
@@ -934,57 +934,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
934 return ant; 934 return ant;
935} 935}
936 936
937/* notification wait support */
938void iwlagn_init_notification_wait(struct iwl_priv *priv,
939 struct iwl_notification_wait *wait_entry,
940 u8 cmd,
941 void (*fn)(struct iwl_priv *priv,
942 struct iwl_rx_packet *pkt,
943 void *data),
944 void *fn_data)
945{
946 wait_entry->fn = fn;
947 wait_entry->fn_data = fn_data;
948 wait_entry->cmd = cmd;
949 wait_entry->triggered = false;
950 wait_entry->aborted = false;
951
952 spin_lock_bh(&priv->notif_wait_lock);
953 list_add(&wait_entry->list, &priv->notif_waits);
954 spin_unlock_bh(&priv->notif_wait_lock);
955}
956
957int iwlagn_wait_notification(struct iwl_priv *priv,
958 struct iwl_notification_wait *wait_entry,
959 unsigned long timeout)
960{
961 int ret;
962
963 ret = wait_event_timeout(priv->notif_waitq,
964 wait_entry->triggered || wait_entry->aborted,
965 timeout);
966
967 spin_lock_bh(&priv->notif_wait_lock);
968 list_del(&wait_entry->list);
969 spin_unlock_bh(&priv->notif_wait_lock);
970
971 if (wait_entry->aborted)
972 return -EIO;
973
974 /* return value is always >= 0 */
975 if (ret <= 0)
976 return -ETIMEDOUT;
977 return 0;
978}
979
980void iwlagn_remove_notification(struct iwl_priv *priv,
981 struct iwl_notification_wait *wait_entry)
982{
983 spin_lock_bh(&priv->notif_wait_lock);
984 list_del(&wait_entry->list);
985 spin_unlock_bh(&priv->notif_wait_lock);
986}
987
988#ifdef CONFIG_PM_SLEEP 937#ifdef CONFIG_PM_SLEEP
989static void iwlagn_convert_p1k(u16 *p1k, __le16 *out) 938static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
990{ 939{
@@ -1208,7 +1157,7 @@ int iwlagn_suspend(struct iwl_priv *priv,
1208 * For QoS counters, we store the one to use next, so subtract 0x10 1157 * For QoS counters, we store the one to use next, so subtract 0x10
1209 * since the uCode will add 0x10 before using the value. 1158 * since the uCode will add 0x10 before using the value.
1210 */ 1159 */
1211 for (i = 0; i < 8; i++) { 1160 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1212 seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number; 1161 seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
1213 seq -= 0x10; 1162 seq -= 0x10;
1214 wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq); 1163 wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 359c47a4fcea..a23835a7797a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -298,7 +298,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
298 } else 298 } else
299 return IWL_MAX_TID_COUNT; 299 return IWL_MAX_TID_COUNT;
300 300
301 if (unlikely(tid >= TID_MAX_LOAD_COUNT)) 301 if (unlikely(tid >= IWL_MAX_TID_COUNT))
302 return IWL_MAX_TID_COUNT; 302 return IWL_MAX_TID_COUNT;
303 303
304 tl = &lq_data->load[tid]; 304 tl = &lq_data->load[tid];
@@ -352,7 +352,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
352 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 352 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
353 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ 353 lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
354 354
355#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL 355#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
356 /* testmode has higher priority to overwirte the fixed rate */ 356 /* testmode has higher priority to overwirte the fixed rate */
357 if (priv->tm_fixed_rate) 357 if (priv->tm_fixed_rate)
358 lq_sta->dbg_fixed_rate = priv->tm_fixed_rate; 358 lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
@@ -379,7 +379,7 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
379 s32 index; 379 s32 index;
380 struct iwl_traffic_load *tl = NULL; 380 struct iwl_traffic_load *tl = NULL;
381 381
382 if (tid >= TID_MAX_LOAD_COUNT) 382 if (tid >= IWL_MAX_TID_COUNT)
383 return 0; 383 return 0;
384 384
385 tl = &(lq_data->load[tid]); 385 tl = &(lq_data->load[tid]);
@@ -444,11 +444,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
444 struct iwl_lq_sta *lq_data, 444 struct iwl_lq_sta *lq_data,
445 struct ieee80211_sta *sta) 445 struct ieee80211_sta *sta)
446{ 446{
447 if (tid < TID_MAX_LOAD_COUNT) 447 if (tid < IWL_MAX_TID_COUNT)
448 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 448 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
449 else 449 else
450 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", 450 IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
451 tid, TID_MAX_LOAD_COUNT); 451 tid, IWL_MAX_TID_COUNT);
452} 452}
453 453
454static inline int get_num_of_ant_from_rate(u32 rate_n_flags) 454static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
@@ -1081,7 +1081,7 @@ done:
1081 if (sta && sta->supp_rates[sband->band]) 1081 if (sta && sta->supp_rates[sband->band])
1082 rs_rate_scale_perform(priv, skb, sta, lq_sta); 1082 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1083 1083
1084#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL) 1084#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
1085 if ((priv->tm_fixed_rate) && 1085 if ((priv->tm_fixed_rate) &&
1086 (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) 1086 (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
1087 rs_program_fix_rate(priv, lq_sta); 1087 rs_program_fix_rate(priv, lq_sta);
@@ -2904,7 +2904,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2904 if (sband->band == IEEE80211_BAND_5GHZ) 2904 if (sband->band == IEEE80211_BAND_5GHZ)
2905 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2905 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2906 lq_sta->is_agg = 0; 2906 lq_sta->is_agg = 0;
2907#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL 2907#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
2908 priv->tm_fixed_rate = 0; 2908 priv->tm_fixed_rate = 0;
2909#endif 2909#endif
2910#ifdef CONFIG_MAC80211_DEBUGFS 2910#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index f4f6deb829ae..6675b3c816d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -281,7 +281,6 @@ enum {
281#define TID_QUEUE_CELL_SPACING 50 /*mS */ 281#define TID_QUEUE_CELL_SPACING 50 /*mS */
282#define TID_QUEUE_MAX_SIZE 20 282#define TID_QUEUE_MAX_SIZE 20
283#define TID_ROUND_VALUE 5 /* mS */ 283#define TID_ROUND_VALUE 5 /* mS */
284#define TID_MAX_LOAD_COUNT 8
285 284
286#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) 285#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
287#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) 286#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
@@ -402,7 +401,7 @@ struct iwl_lq_sta {
402 401
403 struct iwl_link_quality_cmd lq; 402 struct iwl_link_quality_cmd lq;
404 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ 403 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
405 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT]; 404 struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
406 u8 tx_agg_tid_en; 405 u8 tx_agg_tid_en;
407#ifdef CONFIG_MAC80211_DEBUGFS 406#ifdef CONFIG_MAC80211_DEBUGFS
408 struct dentry *rs_sta_dbgfs_scale_table_file; 407 struct dentry *rs_sta_dbgfs_scale_table_file;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index fdb4c3786114..9001c23f27bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -117,6 +117,7 @@ const char *get_cmd_string(u8 cmd)
117 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS); 117 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
118 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL); 118 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
119 IWL_CMD(REPLY_WOWLAN_GET_STATUS); 119 IWL_CMD(REPLY_WOWLAN_GET_STATUS);
120 IWL_CMD(REPLY_D3_CONFIG);
120 default: 121 default:
121 return "UNKNOWN"; 122 return "UNKNOWN";
122 123
@@ -1130,9 +1131,9 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
1130 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; 1131 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
1131 1132
1132 /* set up notification wait support */ 1133 /* set up notification wait support */
1133 spin_lock_init(&priv->notif_wait_lock); 1134 spin_lock_init(&priv->shrd->notif_wait_lock);
1134 INIT_LIST_HEAD(&priv->notif_waits); 1135 INIT_LIST_HEAD(&priv->shrd->notif_waits);
1135 init_waitqueue_head(&priv->notif_waitq); 1136 init_waitqueue_head(&priv->shrd->notif_waitq);
1136 1137
1137 /* Set up BT Rx handlers */ 1138 /* Set up BT Rx handlers */
1138 if (priv->cfg->lib->bt_rx_handler_setup) 1139 if (priv->cfg->lib->bt_rx_handler_setup)
@@ -1151,11 +1152,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
1151 * even if the RX handler consumes the RXB we have 1152 * even if the RX handler consumes the RXB we have
1152 * access to it in the notification wait entry. 1153 * access to it in the notification wait entry.
1153 */ 1154 */
1154 if (!list_empty(&priv->notif_waits)) { 1155 if (!list_empty(&priv->shrd->notif_waits)) {
1155 struct iwl_notification_wait *w; 1156 struct iwl_notification_wait *w;
1156 1157
1157 spin_lock(&priv->notif_wait_lock); 1158 spin_lock(&priv->shrd->notif_wait_lock);
1158 list_for_each_entry(w, &priv->notif_waits, list) { 1159 list_for_each_entry(w, &priv->shrd->notif_waits, list) {
1159 if (w->cmd != pkt->hdr.cmd) 1160 if (w->cmd != pkt->hdr.cmd)
1160 continue; 1161 continue;
1161 IWL_DEBUG_RX(priv, 1162 IWL_DEBUG_RX(priv,
@@ -1164,11 +1165,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
1164 pkt->hdr.cmd); 1165 pkt->hdr.cmd);
1165 w->triggered = true; 1166 w->triggered = true;
1166 if (w->fn) 1167 if (w->fn)
1167 w->fn(priv, pkt, w->fn_data); 1168 w->fn(trans(priv), pkt, w->fn_data);
1168 } 1169 }
1169 spin_unlock(&priv->notif_wait_lock); 1170 spin_unlock(&priv->shrd->notif_wait_lock);
1170 1171
1171 wake_up_all(&priv->notif_waitq); 1172 wake_up_all(&priv->shrd->notif_waitq);
1172 } 1173 }
1173 1174
1174 if (priv->pre_rx_handler) 1175 if (priv->pre_rx_handler)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 8de97f5a1825..d21f535a3b4f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -60,7 +60,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
60 u8 old_dev_type = send->dev_type; 60 u8 old_dev_type = send->dev_type;
61 int ret; 61 int ret;
62 62
63 iwlagn_init_notification_wait(priv, &disable_wait, 63 iwl_init_notification_wait(priv->shrd, &disable_wait,
64 REPLY_WIPAN_DEACTIVATION_COMPLETE, 64 REPLY_WIPAN_DEACTIVATION_COMPLETE,
65 NULL, NULL); 65 NULL, NULL);
66 66
@@ -74,9 +74,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
74 74
75 if (ret) { 75 if (ret) {
76 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); 76 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
77 iwlagn_remove_notification(priv, &disable_wait); 77 iwl_remove_notification(priv->shrd, &disable_wait);
78 } else { 78 } else {
79 ret = iwlagn_wait_notification(priv, &disable_wait, HZ); 79 ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
80 if (ret) 80 if (ret)
81 IWL_ERR(priv, "Timed out waiting for PAN disable\n"); 81 IWL_ERR(priv, "Timed out waiting for PAN disable\n");
82 } 82 }
@@ -529,6 +529,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
529 return 0; 529 return 0;
530} 530}
531 531
532void iwlagn_config_ht40(struct ieee80211_conf *conf,
533 struct iwl_rxon_context *ctx)
534{
535 if (conf_is_ht40_minus(conf)) {
536 ctx->ht.extension_chan_offset =
537 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
538 ctx->ht.is_40mhz = true;
539 } else if (conf_is_ht40_plus(conf)) {
540 ctx->ht.extension_chan_offset =
541 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
542 ctx->ht.is_40mhz = true;
543 } else {
544 ctx->ht.extension_chan_offset =
545 IEEE80211_HT_PARAM_CHA_SEC_NONE;
546 ctx->ht.is_40mhz = false;
547 }
548}
549
532int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) 550int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
533{ 551{
534 struct iwl_priv *priv = hw->priv; 552 struct iwl_priv *priv = hw->priv;
@@ -590,19 +608,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
590 ctx->ht.enabled = conf_is_ht(conf); 608 ctx->ht.enabled = conf_is_ht(conf);
591 609
592 if (ctx->ht.enabled) { 610 if (ctx->ht.enabled) {
593 if (conf_is_ht40_minus(conf)) { 611 /* if HT40 is used, it should not change
594 ctx->ht.extension_chan_offset = 612 * after associated except channel switch */
595 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 613 if (!ctx->ht.is_40mhz ||
596 ctx->ht.is_40mhz = true; 614 !iwl_is_associated_ctx(ctx))
597 } else if (conf_is_ht40_plus(conf)) { 615 iwlagn_config_ht40(conf, ctx);
598 ctx->ht.extension_chan_offset =
599 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
600 ctx->ht.is_40mhz = true;
601 } else {
602 ctx->ht.extension_chan_offset =
603 IEEE80211_HT_PARAM_CHA_SEC_NONE;
604 ctx->ht.is_40mhz = false;
605 }
606 } else 616 } else
607 ctx->ht.is_40mhz = false; 617 ctx->ht.is_40mhz = false;
608 618
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 901fd9485d75..63d948d21c04 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -135,8 +135,8 @@ static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
135 u16 size = (u16)sizeof(struct iwl_addsta_cmd); 135 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
136 struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data; 136 struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
137 memcpy(addsta, cmd, size); 137 memcpy(addsta, cmd, size);
138 /* resrved in 5000 */ 138 /* resrved in agn */
139 addsta->rate_n_flags = cpu_to_le16(0); 139 addsta->legacy_reserved = cpu_to_le16(0);
140 return size; 140 return size;
141} 141}
142 142
@@ -1250,9 +1250,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1250 1250
1251 switch (keyconf->cipher) { 1251 switch (keyconf->cipher) {
1252 case WLAN_CIPHER_SUITE_TKIP: 1252 case WLAN_CIPHER_SUITE_TKIP:
1253 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1254 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1255
1256 if (sta) 1253 if (sta)
1257 addr = sta->addr; 1254 addr = sta->addr;
1258 else /* station mode case only */ 1255 else /* station mode case only */
@@ -1265,8 +1262,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1265 seq.tkip.iv32, p1k, CMD_SYNC); 1262 seq.tkip.iv32, p1k, CMD_SYNC);
1266 break; 1263 break;
1267 case WLAN_CIPHER_SUITE_CCMP: 1264 case WLAN_CIPHER_SUITE_CCMP:
1268 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1269 /* fall through */
1270 case WLAN_CIPHER_SUITE_WEP40: 1265 case WLAN_CIPHER_SUITE_WEP40:
1271 case WLAN_CIPHER_SUITE_WEP104: 1266 case WLAN_CIPHER_SUITE_WEP104:
1272 ret = iwlagn_send_sta_key(priv, keyconf, sta_id, 1267 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index e6a02e09ee18..81754cddba73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
91 tx_cmd->tid_tspec = qc[0] & 0xf; 91 tx_cmd->tid_tspec = qc[0] & 0xf;
92 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 92 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
93 } else { 93 } else {
94 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 94 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
95 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
96 else
97 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
95 } 98 }
96 99
97 iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); 100 iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
@@ -148,7 +151,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
148 if (ieee80211_is_data(fc)) { 151 if (ieee80211_is_data(fc)) {
149 tx_cmd->initial_rate_index = 0; 152 tx_cmd->initial_rate_index = 0;
150 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; 153 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
151#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL 154#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
152 if (priv->tm_fixed_rate) { 155 if (priv->tm_fixed_rate) {
153 /* 156 /*
154 * rate overwrite by testmode 157 * rate overwrite by testmode
@@ -161,7 +164,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
161 } 164 }
162#endif 165#endif
163 return; 166 return;
164 } 167 } else if (ieee80211_is_back_req(fc))
168 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
165 169
166 /** 170 /**
167 * If the current TX rate stored in mac80211 has the MCS bit set, it's 171 * If the current TX rate stored in mac80211 has the MCS bit set, it's
@@ -790,6 +794,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
790 iwl_rx_reply_tx_agg(priv, tx_resp); 794 iwl_rx_reply_tx_agg(priv, tx_resp);
791 795
792 if (tx_resp->frame_count == 1) { 796 if (tx_resp->frame_count == 1) {
797 IWL_DEBUG_TX_REPLY(priv, "Q %d, ssn %d", txq_id, ssn);
793 __skb_queue_head_init(&skbs); 798 __skb_queue_head_init(&skbs);
794 /*we can free until ssn % q.n_bd not inclusive */ 799 /*we can free until ssn % q.n_bd not inclusive */
795 iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, 800 iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
@@ -920,11 +925,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
920 ba_resp->sta_id); 925 ba_resp->sta_id);
921 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " 926 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
922 "scd_flow = %d, scd_ssn = %d\n", 927 "scd_flow = %d, scd_ssn = %d\n",
923 ba_resp->tid, 928 ba_resp->tid, ba_resp->seq_ctl,
924 ba_resp->seq_ctl,
925 (unsigned long long)le64_to_cpu(ba_resp->bitmap), 929 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
926 ba_resp->scd_flow, 930 scd_flow, ba_resp_scd_ssn);
927 ba_resp->scd_ssn);
928 931
929 /* Mark that the expected block-ack response arrived */ 932 /* Mark that the expected block-ack response arrived */
930 agg->wait_for_ba = false; 933 agg->wait_for_ba = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index e235e84de8b4..f5fe42dbb3b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -366,7 +366,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
366 u32 num_wraps; /* # times uCode wrapped to top of log */ 366 u32 num_wraps; /* # times uCode wrapped to top of log */
367 u32 next_entry; /* index of next entry to be written by uCode */ 367 u32 next_entry; /* index of next entry to be written by uCode */
368 368
369 base = priv->device_pointers.error_event_table; 369 base = priv->shrd->device_pointers.error_event_table;
370 if (iwlagn_hw_valid_rtc_data_addr(base)) { 370 if (iwlagn_hw_valid_rtc_data_addr(base)) {
371 capacity = iwl_read_targ_mem(bus(priv), base); 371 capacity = iwl_read_targ_mem(bus(priv), base);
372 num_wraps = iwl_read_targ_mem(bus(priv), 372 num_wraps = iwl_read_targ_mem(bus(priv),
@@ -1036,6 +1036,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1036 priv->inst_evtlog_size = 1036 priv->inst_evtlog_size =
1037 priv->cfg->base_params->max_event_log_size; 1037 priv->cfg->base_params->max_event_log_size;
1038 priv->inst_errlog_ptr = pieces.inst_errlog_ptr; 1038 priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
1039#ifndef CONFIG_IWLWIFI_P2P
1040 ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1041#endif
1039 1042
1040 priv->new_scan_threshold_behaviour = 1043 priv->new_scan_threshold_behaviour =
1041 !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN); 1044 !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
@@ -1057,7 +1060,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1057 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1060 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1058 priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1061 priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1059 } 1062 }
1060
1061 /* 1063 /*
1062 * figure out the offset of chain noise reset and gain commands 1064 * figure out the offset of chain noise reset and gain commands
1063 * base on the size of standard phy calibration commands table size 1065 * base on the size of standard phy calibration commands table size
@@ -1232,14 +1234,14 @@ int iwl_alive_start(struct iwl_priv *priv)
1232 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS; 1234 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
1233 priv->cur_rssi_ctx = NULL; 1235 priv->cur_rssi_ctx = NULL;
1234 1236
1235 iwlagn_send_prio_tbl(priv); 1237 iwl_send_prio_tbl(trans(priv));
1236 1238
1237 /* FIXME: w/a to force change uCode BT state machine */ 1239 /* FIXME: w/a to force change uCode BT state machine */
1238 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, 1240 ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
1239 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); 1241 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
1240 if (ret) 1242 if (ret)
1241 return ret; 1243 return ret;
1242 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE, 1244 ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
1243 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); 1245 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
1244 if (ret) 1246 if (ret)
1245 return ret; 1247 return ret;
@@ -1575,6 +1577,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
1575 1577
1576 mutex_init(&priv->shrd->mutex); 1578 mutex_init(&priv->shrd->mutex);
1577 1579
1580 INIT_LIST_HEAD(&trans(priv)->calib_results);
1581
1578 priv->ieee_channels = NULL; 1582 priv->ieee_channels = NULL;
1579 priv->ieee_rates = NULL; 1583 priv->ieee_rates = NULL;
1580 priv->band = IEEE80211_BAND_2GHZ; 1584 priv->band = IEEE80211_BAND_2GHZ;
@@ -1631,7 +1635,6 @@ err:
1631 1635
1632static void iwl_uninit_drv(struct iwl_priv *priv) 1636static void iwl_uninit_drv(struct iwl_priv *priv)
1633{ 1637{
1634 iwl_calib_free_results(priv);
1635 iwl_free_geos(priv); 1638 iwl_free_geos(priv);
1636 iwl_free_channel_map(priv); 1639 iwl_free_channel_map(priv);
1637 if (priv->tx_cmd_pool) 1640 if (priv->tx_cmd_pool)
@@ -1680,6 +1683,41 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
1680 1683
1681 1684
1682 1685
1686static void iwl_debug_config(struct iwl_priv *priv)
1687{
1688 dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
1689#ifdef CONFIG_IWLWIFI_DEBUG
1690 "enabled\n");
1691#else
1692 "disabled\n");
1693#endif
1694 dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
1695#ifdef CONFIG_IWLWIFI_DEBUGFS
1696 "enabled\n");
1697#else
1698 "disabled\n");
1699#endif
1700 dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
1701#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
1702 "enabled\n");
1703#else
1704 "disabled\n");
1705#endif
1706
1707 dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
1708#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1709 "enabled\n");
1710#else
1711 "disabled\n");
1712#endif
1713 dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P "
1714#ifdef CONFIG_IWLWIFI_P2P
1715 "enabled\n");
1716#else
1717 "disabled\n");
1718#endif
1719}
1720
1683int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops, 1721int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
1684 struct iwl_cfg *cfg) 1722 struct iwl_cfg *cfg)
1685{ 1723{
@@ -1715,6 +1753,9 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
1715 1753
1716 SET_IEEE80211_DEV(hw, bus(priv)->dev); 1754 SET_IEEE80211_DEV(hw, bus(priv)->dev);
1717 1755
1756 /* what debugging capabilities we have */
1757 iwl_debug_config(priv);
1758
1718 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 1759 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
1719 priv->cfg = cfg; 1760 priv->cfg = cfg;
1720 1761
@@ -1780,11 +1821,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
1780 goto out_free_eeprom; 1821 goto out_free_eeprom;
1781 1822
1782 /* extract MAC Address */ 1823 /* extract MAC Address */
1783 iwl_eeprom_get_mac(priv, priv->addresses[0].addr); 1824 iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr);
1784 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); 1825 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
1785 priv->hw->wiphy->addresses = priv->addresses; 1826 priv->hw->wiphy->addresses = priv->addresses;
1786 priv->hw->wiphy->n_addresses = 1; 1827 priv->hw->wiphy->n_addresses = 1;
1787 num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS); 1828 num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS);
1788 if (num_mac > 1) { 1829 if (num_mac > 1) {
1789 memcpy(priv->addresses[1].addr, priv->addresses[0].addr, 1830 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
1790 ETH_ALEN); 1831 ETH_ALEN);
@@ -1849,7 +1890,7 @@ out_destroy_workqueue:
1849 priv->shrd->workqueue = NULL; 1890 priv->shrd->workqueue = NULL;
1850 iwl_uninit_drv(priv); 1891 iwl_uninit_drv(priv);
1851out_free_eeprom: 1892out_free_eeprom:
1852 iwl_eeprom_free(priv); 1893 iwl_eeprom_free(priv->shrd);
1853out_free_trans: 1894out_free_trans:
1854 iwl_trans_free(trans(priv)); 1895 iwl_trans_free(trans(priv));
1855out_free_traffic_mem: 1896out_free_traffic_mem:
@@ -1888,7 +1929,7 @@ void __devexit iwl_remove(struct iwl_priv * priv)
1888 1929
1889 iwl_dealloc_ucode(trans(priv)); 1930 iwl_dealloc_ucode(trans(priv));
1890 1931
1891 iwl_eeprom_free(priv); 1932 iwl_eeprom_free(priv->shrd);
1892 1933
1893 /*netif_stop_queue(dev); */ 1934 /*netif_stop_queue(dev); */
1894 flush_workqueue(priv->shrd->workqueue); 1935 flush_workqueue(priv->shrd->workqueue);
@@ -1988,9 +2029,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
1988module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); 2029module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
1989MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); 2030MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
1990 2031
1991module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO); 2032module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
1992MODULE_PARM_DESC(wd_disable, 2033MODULE_PARM_DESC(wd_disable,
1993 "Disable stuck queue watchdog timer (default: 0 [enabled])"); 2034 "Disable stuck queue watchdog timer 0=system default, "
2035 "1=disable, 2=enable (default: 0)");
1994 2036
1995/* 2037/*
1996 * set bt_coex_active to true, uCode will do kill/defer 2038 * set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5d8d2f445923..eb453ea41c41 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -101,13 +101,15 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
101 struct ieee80211_vif *vif, 101 struct ieee80211_vif *vif,
102 struct ieee80211_bss_conf *bss_conf, 102 struct ieee80211_bss_conf *bss_conf,
103 u32 changes); 103 u32 changes);
104void iwlagn_config_ht40(struct ieee80211_conf *conf,
105 struct iwl_rxon_context *ctx);
104 106
105/* uCode */ 107/* uCode */
106int iwlagn_rx_calib_result(struct iwl_priv *priv, 108int iwlagn_rx_calib_result(struct iwl_priv *priv,
107 struct iwl_rx_mem_buffer *rxb, 109 struct iwl_rx_mem_buffer *rxb,
108 struct iwl_device_cmd *cmd); 110 struct iwl_device_cmd *cmd);
109int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type); 111int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
110void iwlagn_send_prio_tbl(struct iwl_priv *priv); 112void iwl_send_prio_tbl(struct iwl_trans *trans);
111int iwlagn_run_init_ucode(struct iwl_priv *priv); 113int iwlagn_run_init_ucode(struct iwl_priv *priv);
112int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv, 114int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
113 enum iwl_ucode_type ucode_type); 115 enum iwl_ucode_type ucode_type);
@@ -115,7 +117,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
115/* lib */ 117/* lib */
116int iwlagn_send_tx_power(struct iwl_priv *priv); 118int iwlagn_send_tx_power(struct iwl_priv *priv);
117void iwlagn_temperature(struct iwl_priv *priv); 119void iwlagn_temperature(struct iwl_priv *priv);
118u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv); 120u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
119int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 121int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
120void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 122void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
121int iwlagn_send_beacon_cmd(struct iwl_priv *priv); 123int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
@@ -352,28 +354,12 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
352 354
353/* eeprom */ 355/* eeprom */
354void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv); 356void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
355void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); 357void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
356 358
357/* notification wait support */
358void __acquires(wait_entry)
359iwlagn_init_notification_wait(struct iwl_priv *priv,
360 struct iwl_notification_wait *wait_entry,
361 u8 cmd,
362 void (*fn)(struct iwl_priv *priv,
363 struct iwl_rx_packet *pkt,
364 void *data),
365 void *fn_data);
366int __must_check __releases(wait_entry)
367iwlagn_wait_notification(struct iwl_priv *priv,
368 struct iwl_notification_wait *wait_entry,
369 unsigned long timeout);
370void __releases(wait_entry)
371iwlagn_remove_notification(struct iwl_priv *priv,
372 struct iwl_notification_wait *wait_entry);
373extern int iwlagn_init_alive_start(struct iwl_priv *priv); 359extern int iwlagn_init_alive_start(struct iwl_priv *priv);
374extern int iwl_alive_start(struct iwl_priv *priv); 360extern int iwl_alive_start(struct iwl_priv *priv);
375/* svtool */ 361/* svtool */
376#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL 362#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
377extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, 363extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
378 int len); 364 int len);
379extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, 365extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index f4eccf583775..265de39d394c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,10 +109,10 @@ enum {
109 /* RX, TX, LEDs */ 109 /* RX, TX, LEDs */
110 REPLY_TX = 0x1c, 110 REPLY_TX = 0x1c,
111 REPLY_LEDS_CMD = 0x48, 111 REPLY_LEDS_CMD = 0x48,
112 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ 112 REPLY_TX_LINK_QUALITY_CMD = 0x4e,
113 113
114 /* WiMAX coexistence */ 114 /* WiMAX coexistence */
115 COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */ 115 COEX_PRIORITY_TABLE_CMD = 0x5a,
116 COEX_MEDIUM_NOTIFICATION = 0x5b, 116 COEX_MEDIUM_NOTIFICATION = 0x5b,
117 COEX_EVENT_CMD = 0x5c, 117 COEX_EVENT_CMD = 0x5c,
118 118
@@ -466,23 +466,27 @@ struct iwl_error_event_table {
466 u32 frame_ptr; /* frame pointer */ 466 u32 frame_ptr; /* frame pointer */
467 u32 stack_ptr; /* stack pointer */ 467 u32 stack_ptr; /* stack pointer */
468 u32 hcmd; /* last host command header */ 468 u32 hcmd; /* last host command header */
469#if 0 469 u32 isr0; /* isr status register LMPM_NIC_ISR0:
470 /* no need to read the remainder, we don't use the values */ 470 * rxtx_flag */
471 u32 isr0; /* isr status register LMPM_NIC_ISR0: rxtx_flag */ 471 u32 isr1; /* isr status register LMPM_NIC_ISR1:
472 u32 isr1; /* isr status register LMPM_NIC_ISR1: host_flag */ 472 * host_flag */
473 u32 isr2; /* isr status register LMPM_NIC_ISR2: enc_flag */ 473 u32 isr2; /* isr status register LMPM_NIC_ISR2:
474 u32 isr3; /* isr status register LMPM_NIC_ISR3: time_flag */ 474 * enc_flag */
475 u32 isr4; /* isr status register LMPM_NIC_ISR4: wico interrupt */ 475 u32 isr3; /* isr status register LMPM_NIC_ISR3:
476 * time_flag */
477 u32 isr4; /* isr status register LMPM_NIC_ISR4:
478 * wico interrupt */
476 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ 479 u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
477 u32 wait_event; /* wait event() caller address */ 480 u32 wait_event; /* wait event() caller address */
478 u32 l2p_control; /* L2pControlField */ 481 u32 l2p_control; /* L2pControlField */
479 u32 l2p_duration; /* L2pDurationField */ 482 u32 l2p_duration; /* L2pDurationField */
480 u32 l2p_mhvalid; /* L2pMhValidBits */ 483 u32 l2p_mhvalid; /* L2pMhValidBits */
481 u32 l2p_addr_match; /* L2pAddrMatchStat */ 484 u32 l2p_addr_match; /* L2pAddrMatchStat */
482 u32 lmpm_pmg_sel; /* indicate which clocks are turned on (LMPM_PMG_SEL) */ 485 u32 lmpm_pmg_sel; /* indicate which clocks are turned on
483 u32 u_timestamp; /* indicate when the date and time of the compilation */ 486 * (LMPM_PMG_SEL) */
487 u32 u_timestamp; /* indicate when the date and time of the
488 * compilation */
484 u32 flow_handler; /* FH read/write pointers, RX credit */ 489 u32 flow_handler; /* FH read/write pointers, RX credit */
485#endif
486} __packed; 490} __packed;
487 491
488struct iwl_alive_resp { 492struct iwl_alive_resp {
@@ -810,7 +814,7 @@ struct iwl_qosparam_cmd {
810#define IWLAGN_STATION_COUNT 16 814#define IWLAGN_STATION_COUNT 16
811 815
812#define IWL_INVALID_STATION 255 816#define IWL_INVALID_STATION 255
813#define IWL_MAX_TID_COUNT 9 817#define IWL_MAX_TID_COUNT 8
814 818
815#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 819#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
816#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) 820#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -931,8 +935,7 @@ struct iwl_addsta_cmd {
931 * corresponding to bit (e.g. bit 5 controls TID 5). 935 * corresponding to bit (e.g. bit 5 controls TID 5).
932 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ 936 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
933 __le16 tid_disable_tx; 937 __le16 tid_disable_tx;
934 938 __le16 legacy_reserved;
935 __le16 rate_n_flags; /* 3945 only */
936 939
937 /* TID for which to add block-ack support. 940 /* TID for which to add block-ack support.
938 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 941 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1162,8 +1165,7 @@ struct iwl_rx_mpdu_res_start {
1162 * 1165 *
1163 * uCode handles retrying Tx when an ACK is expected but not received. 1166 * uCode handles retrying Tx when an ACK is expected but not received.
1164 * This includes trying lower data rates than the one requested in the Tx 1167 * This includes trying lower data rates than the one requested in the Tx
1165 * command, as set up by the REPLY_RATE_SCALE (for 3945) or 1168 * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
1166 * REPLY_TX_LINK_QUALITY_CMD (agn).
1167 * 1169 *
1168 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. 1170 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1169 * This command must be executed after every RXON command, before Tx can occur. 1171 * This command must be executed after every RXON command, before Tx can occur.
@@ -1175,25 +1177,9 @@ struct iwl_rx_mpdu_res_start {
1175 * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it 1177 * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
1176 * before this frame. if CTS-to-self required check 1178 * before this frame. if CTS-to-self required check
1177 * RXON_FLG_SELF_CTS_EN status. 1179 * RXON_FLG_SELF_CTS_EN status.
1178 * unused in 3945/4965, used in 5000 series and after
1179 */ 1180 */
1180#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0) 1181#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
1181 1182
1182/*
1183 * 1: Use Request-To-Send protocol before this frame.
1184 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
1185 * used in 3945/4965, unused in 5000 series and after
1186 */
1187#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1188
1189/*
1190 * 1: Transmit Clear-To-Send to self before this frame.
1191 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1192 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
1193 * used in 3945/4965, unused in 5000 series and after
1194 */
1195#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1196
1197/* 1: Expect ACK from receiving station 1183/* 1: Expect ACK from receiving station
1198 * 0: Don't expect ACK (MAC header's duration field s/b 0) 1184 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1199 * Set this for unicast frames, but not broadcast/multicast. */ 1185 * Set this for unicast frames, but not broadcast/multicast. */
@@ -1211,18 +1197,8 @@ struct iwl_rx_mpdu_res_start {
1211 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ 1197 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1212#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) 1198#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1213 1199
1214/* 1200/* Tx antenna selection field; reserved (0) for agn devices. */
1215 * 1: Frame requires full Tx-Op protection.
1216 * Set this if either RTS or CTS Tx Flag gets set.
1217 * used in 3945/4965, unused in 5000 series and after
1218 */
1219#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1220
1221/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
1222 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1223#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) 1201#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1224#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
1225#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
1226 1202
1227/* 1: Ignore Bluetooth priority for this frame. 1203/* 1: Ignore Bluetooth priority for this frame.
1228 * 0: Delay Tx until Bluetooth device is done (normal usage). */ 1204 * 0: Delay Tx until Bluetooth device is done (normal usage). */
@@ -1568,7 +1544,6 @@ struct iwl_compressed_ba_resp {
1568 __le64 bitmap; 1544 __le64 bitmap;
1569 __le16 scd_flow; 1545 __le16 scd_flow;
1570 __le16 scd_ssn; 1546 __le16 scd_ssn;
1571 /* following only for 5000 series and up */
1572 u8 txed; /* number of frames sent */ 1547 u8 txed; /* number of frames sent */
1573 u8 txed_2_done; /* number of frames acked */ 1548 u8 txed_2_done; /* number of frames acked */
1574} __packed; 1549} __packed;
@@ -1670,7 +1645,7 @@ struct iwl_link_qual_agg_params {
1670/* 1645/*
1671 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) 1646 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1672 * 1647 *
1673 * For agn devices only; 3945 uses REPLY_RATE_SCALE. 1648 * For agn devices
1674 * 1649 *
1675 * Each station in the agn device's internal station table has its own table 1650 * Each station in the agn device's internal station table has its own table
1676 * of 16 1651 * of 16
@@ -1919,7 +1894,7 @@ struct iwl_link_quality_cmd {
1919/* 1894/*
1920 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 1895 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
1921 * 1896 *
1922 * 3945 and agn devices support hardware handshake with Bluetooth device on 1897 * agn devices support hardware handshake with Bluetooth device on
1923 * same platform. Bluetooth device alerts wireless device when it will Tx; 1898 * same platform. Bluetooth device alerts wireless device when it will Tx;
1924 * wireless device can delay or kill its own Tx to accommodate. 1899 * wireless device can delay or kill its own Tx to accommodate.
1925 */ 1900 */
@@ -2203,8 +2178,8 @@ struct iwl_spectrum_notification {
2203 2178
2204struct iwl_powertable_cmd { 2179struct iwl_powertable_cmd {
2205 __le16 flags; 2180 __le16 flags;
2206 u8 keep_alive_seconds; /* 3945 reserved */ 2181 u8 keep_alive_seconds;
2207 u8 debug_flags; /* 3945 reserved */ 2182 u8 debug_flags;
2208 __le32 rx_data_timeout; 2183 __le32 rx_data_timeout;
2209 __le32 tx_data_timeout; 2184 __le32 tx_data_timeout;
2210 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2185 __le32 sleep_interval[IWL_POWER_VEC_SIZE];
@@ -2325,9 +2300,9 @@ struct iwl_scan_channel {
2325/** 2300/**
2326 * struct iwl_ssid_ie - directed scan network information element 2301 * struct iwl_ssid_ie - directed scan network information element
2327 * 2302 *
2328 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in 2303 * Up to 20 of these may appear in REPLY_SCAN_CMD,
2329 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; 2304 * selected by "type" bit field in struct iwl_scan_channel;
2330 * each channel may select different ssids from among the 20 (4) entries. 2305 * each channel may select different ssids from among the 20 entries.
2331 * SSID IEs get transmitted in reverse order of entry. 2306 * SSID IEs get transmitted in reverse order of entry.
2332 */ 2307 */
2333struct iwl_ssid_ie { 2308struct iwl_ssid_ie {
@@ -2336,7 +2311,6 @@ struct iwl_ssid_ie {
2336 u8 ssid[32]; 2311 u8 ssid[32];
2337} __packed; 2312} __packed;
2338 2313
2339#define PROBE_OPTION_MAX_3945 4
2340#define PROBE_OPTION_MAX 20 2314#define PROBE_OPTION_MAX 20
2341#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2315#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2342#define IWL_GOOD_CRC_TH_DISABLED 0 2316#define IWL_GOOD_CRC_TH_DISABLED 0
@@ -2417,8 +2391,6 @@ struct iwl_scan_cmd {
2417 * channel */ 2391 * channel */
2418 __le32 suspend_time; /* pause scan this long (in "extended beacon 2392 __le32 suspend_time; /* pause scan this long (in "extended beacon
2419 * format") when returning to service chnl: 2393 * format") when returning to service chnl:
2420 * 3945; 31:24 # beacons, 19:0 additional usec,
2421 * 4965; 31:22 # beacons, 21:0 additional usec.
2422 */ 2394 */
2423 __le32 flags; /* RXON_FLG_* */ 2395 __le32 flags; /* RXON_FLG_* */
2424 __le32 filter_flags; /* RXON_FILTER_* */ 2396 __le32 filter_flags; /* RXON_FILTER_* */
@@ -2734,7 +2706,7 @@ struct statistics_div {
2734 2706
2735struct statistics_general_common { 2707struct statistics_general_common {
2736 __le32 temperature; /* radio temperature */ 2708 __le32 temperature; /* radio temperature */
2737 __le32 temperature_m; /* for 5000 and up, this is radio voltage */ 2709 __le32 temperature_m; /* radio voltage */
2738 struct statistics_dbg dbg; 2710 struct statistics_dbg dbg;
2739 __le32 sleep_time; 2711 __le32 sleep_time;
2740 __le32 slots_out; 2712 __le32 slots_out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index f9e9170e977a..3b6f48bfe0e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -836,19 +836,6 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
836} 836}
837#endif 837#endif
838 838
839static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
840{
841 unsigned long flags;
842 struct iwl_notification_wait *wait_entry;
843
844 spin_lock_irqsave(&priv->notif_wait_lock, flags);
845 list_for_each_entry(wait_entry, &priv->notif_waits, list)
846 wait_entry->aborted = true;
847 spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
848
849 wake_up_all(&priv->notif_waitq);
850}
851
852void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) 839void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
853{ 840{
854 unsigned int reload_msec; 841 unsigned int reload_msec;
@@ -860,7 +847,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
860 /* Cancel currently queued command. */ 847 /* Cancel currently queued command. */
861 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status); 848 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
862 849
863 iwlagn_abort_notification_waits(priv); 850 iwl_abort_notification_waits(priv->shrd);
864 851
865 /* Keep the restart process from trying to send host 852 /* Keep the restart process from trying to send host
866 * commands by clearing the ready bit */ 853 * commands by clearing the ready bit */
@@ -1505,11 +1492,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
1505{ 1492{
1506 unsigned int timeout = priv->cfg->base_params->wd_timeout; 1493 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1507 1494
1508 if (timeout && !iwlagn_mod_params.wd_disable) 1495 if (!iwlagn_mod_params.wd_disable) {
1509 mod_timer(&priv->watchdog, 1496 /* use system default */
1510 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); 1497 if (timeout && !priv->cfg->base_params->wd_disable)
1511 else 1498 mod_timer(&priv->watchdog,
1512 del_timer(&priv->watchdog); 1499 jiffies +
1500 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1501 else
1502 del_timer(&priv->watchdog);
1503 } else {
1504 /* module parameter overwrite default configuration */
1505 if (timeout && iwlagn_mod_params.wd_disable == 2)
1506 mod_timer(&priv->watchdog,
1507 jiffies +
1508 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1509 else
1510 del_timer(&priv->watchdog);
1511 }
1513} 1512}
1514 1513
1515/** 1514/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index fa47f75185df..6da53a36c1be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
113 * @shadow_reg_enable: HW shadhow register bit 113 * @shadow_reg_enable: HW shadhow register bit
114 * @no_idle_support: do not support idle mode 114 * @no_idle_support: do not support idle mode
115 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up 115 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
116 * wd_disable: disable watchdog timer
116 */ 117 */
117struct iwl_base_params { 118struct iwl_base_params {
118 int eeprom_size; 119 int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
134 const bool shadow_reg_enable; 135 const bool shadow_reg_enable;
135 const bool no_idle_support; 136 const bool no_idle_support;
136 const bool hd_v2; 137 const bool hd_v2;
138 const bool wd_disable;
137}; 139};
138/* 140/*
139 * @advanced_bt_coexist: support advanced bt coexist 141 * @advanced_bt_coexist: support advanced bt coexist
@@ -184,8 +186,9 @@ struct iwl_ht_params {
184 * @ht_params: point to ht patameters 186 * @ht_params: point to ht patameters
185 * @bt_params: pointer to bt parameters 187 * @bt_params: pointer to bt parameters
186 * @pa_type: used by 6000 series only to identify the type of Power Amplifier 188 * @pa_type: used by 6000 series only to identify the type of Power Amplifier
187 * @need_dc_calib: need to perform init dc calibration
188 * @need_temp_offset_calib: need to perform temperature offset calibration 189 * @need_temp_offset_calib: need to perform temperature offset calibration
190 * @no_xtal_calib: some devices do not need crystal calibration data,
191 * don't send it to those
189 * @scan_antennas: available antenna for scan operation 192 * @scan_antennas: available antenna for scan operation
190 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) 193 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
191 * @adv_pm: advance power management 194 * @adv_pm: advance power management
@@ -222,8 +225,8 @@ struct iwl_cfg {
222 struct iwl_ht_params *ht_params; 225 struct iwl_ht_params *ht_params;
223 struct iwl_bt_params *bt_params; 226 struct iwl_bt_params *bt_params;
224 enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */ 227 enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
225 const bool need_dc_calib; /* if used set to true */
226 const bool need_temp_offset_calib; /* if used set to true */ 228 const bool need_temp_offset_calib; /* if used set to true */
229 const bool no_xtal_calib;
227 u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; 230 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
228 enum iwl_led_mode led_mode; 231 enum iwl_led_mode led_mode;
229 const bool adv_pm; 232 const bool adv_pm;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 40ef97bac1aa..f8fc2393dd4c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,20 +47,21 @@ do { \
47} while (0) 47} while (0)
48 48
49#ifdef CONFIG_IWLWIFI_DEBUG 49#ifdef CONFIG_IWLWIFI_DEBUG
50#define IWL_DEBUG(m, level, fmt, args...) \ 50#define IWL_DEBUG(m, level, fmt, ...) \
51do { \ 51do { \
52 if (iwl_get_debug_level((m)->shrd) & (level)) \ 52 if (iwl_get_debug_level((m)->shrd) & (level)) \
53 dev_printk(KERN_ERR, bus(m)->dev, \ 53 dev_err(bus(m)->dev, "%c %s " fmt, \
54 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ 54 in_interrupt() ? 'I' : 'U', __func__, \
55 __func__ , ## args); \ 55 ##__VA_ARGS__); \
56} while (0) 56} while (0)
57 57
58#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ 58#define IWL_DEBUG_LIMIT(m, level, fmt, ...) \
59do { \ 59do { \
60 if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\ 60 if (iwl_get_debug_level((m)->shrd) & (level) && \
61 dev_printk(KERN_ERR, bus(m)->dev, \ 61 net_ratelimit()) \
62 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ 62 dev_err(bus(m)->dev, "%c %s " fmt, \
63 __func__ , ## args); \ 63 in_interrupt() ? 'I' : 'U', __func__, \
64 ##__VA_ARGS__); \
64} while (0) 65} while (0)
65 66
66#define iwl_print_hex_dump(m, level, p, len) \ 67#define iwl_print_hex_dump(m, level, p, len) \
@@ -70,14 +71,18 @@ do { \
70 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ 71 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
71} while (0) 72} while (0)
72 73
73#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \ 74#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \
74do { \ 75do { \
75 if (!iwl_is_rfkill(p->shrd)) \ 76 if (!iwl_is_rfkill(p->shrd)) \
76 dev_printk(KERN_ERR, bus(p)->dev, "%c %s " fmt, \ 77 dev_err(bus(p)->dev, "%s%c %s " fmt, \
77 (in_interrupt() ? 'I' : 'U'), __func__ , ##args); \ 78 "", \
78 else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \ 79 in_interrupt() ? 'I' : 'U', __func__, \
79 dev_printk(KERN_ERR, bus(p)->dev, "(RFKILL) %c %s " fmt, \ 80 ##__VA_ARGS__); \
80 (in_interrupt() ? 'I' : 'U'), __func__ , ##args); \ 81 else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
82 dev_err(bus(p)->dev, "%s%c %s " fmt, \
83 "(RFKILL) ", \
84 in_interrupt() ? 'I' : 'U', __func__, \
85 ##__VA_ARGS__); \
81} while (0) 86} while (0)
82 87
83#else 88#else
@@ -129,48 +134,43 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
129 */ 134 */
130 135
131/* 0x0000000F - 0x00000001 */ 136/* 0x0000000F - 0x00000001 */
132#define IWL_DL_INFO (1 << 0) 137#define IWL_DL_INFO 0x00000001
133#define IWL_DL_MAC80211 (1 << 1) 138#define IWL_DL_MAC80211 0x00000002
134#define IWL_DL_HCMD (1 << 2) 139#define IWL_DL_HCMD 0x00000004
135#define IWL_DL_STATE (1 << 3) 140#define IWL_DL_STATE 0x00000008
136/* 0x000000F0 - 0x00000010 */ 141/* 0x000000F0 - 0x00000010 */
137#define IWL_DL_MACDUMP (1 << 4) 142#define IWL_DL_EEPROM 0x00000040
138#define IWL_DL_HCMD_DUMP (1 << 5) 143#define IWL_DL_RADIO 0x00000080
139#define IWL_DL_EEPROM (1 << 6)
140#define IWL_DL_RADIO (1 << 7)
141/* 0x00000F00 - 0x00000100 */ 144/* 0x00000F00 - 0x00000100 */
142#define IWL_DL_POWER (1 << 8) 145#define IWL_DL_POWER 0x00000100
143#define IWL_DL_TEMP (1 << 9) 146#define IWL_DL_TEMP 0x00000200
144/* reserved (1 << 10) */ 147#define IWL_DL_SCAN 0x00000800
145#define IWL_DL_SCAN (1 << 11)
146/* 0x0000F000 - 0x00001000 */ 148/* 0x0000F000 - 0x00001000 */
147#define IWL_DL_ASSOC (1 << 12) 149#define IWL_DL_ASSOC 0x00001000
148#define IWL_DL_DROP (1 << 13) 150#define IWL_DL_DROP 0x00002000
149/* reserved (1 << 14) */ 151#define IWL_DL_COEX 0x00008000
150#define IWL_DL_COEX (1 << 15)
151/* 0x000F0000 - 0x00010000 */ 152/* 0x000F0000 - 0x00010000 */
152#define IWL_DL_FW (1 << 16) 153#define IWL_DL_FW 0x00010000
153#define IWL_DL_RF_KILL (1 << 17) 154#define IWL_DL_RF_KILL 0x00020000
154#define IWL_DL_FW_ERRORS (1 << 18) 155#define IWL_DL_FW_ERRORS 0x00040000
155#define IWL_DL_LED (1 << 19) 156#define IWL_DL_LED 0x00080000
156/* 0x00F00000 - 0x00100000 */ 157/* 0x00F00000 - 0x00100000 */
157#define IWL_DL_RATE (1 << 20) 158#define IWL_DL_RATE 0x00100000
158#define IWL_DL_CALIB (1 << 21) 159#define IWL_DL_CALIB 0x00200000
159#define IWL_DL_WEP (1 << 22) 160#define IWL_DL_WEP 0x00400000
160#define IWL_DL_TX (1 << 23) 161#define IWL_DL_TX 0x00800000
161/* 0x0F000000 - 0x01000000 */ 162/* 0x0F000000 - 0x01000000 */
162#define IWL_DL_RX (1 << 24) 163#define IWL_DL_RX 0x01000000
163#define IWL_DL_ISR (1 << 25) 164#define IWL_DL_ISR 0x02000000
164#define IWL_DL_HT (1 << 26) 165#define IWL_DL_HT 0x04000000
165/* 0xF0000000 - 0x10000000 */ 166/* 0xF0000000 - 0x10000000 */
166#define IWL_DL_11H (1 << 28) 167#define IWL_DL_11H 0x10000000
167#define IWL_DL_STATS (1 << 29) 168#define IWL_DL_STATS 0x20000000
168#define IWL_DL_TX_REPLY (1 << 30) 169#define IWL_DL_TX_REPLY 0x40000000
169#define IWL_DL_TX_QUEUES (1 << 31) 170#define IWL_DL_TX_QUEUES 0x80000000
170 171
171#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) 172#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
172#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) 173#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
173#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
174#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) 174#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
175#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) 175#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
176#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) 176#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -179,7 +179,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
179#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) 179#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
180#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) 180#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
181#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) 181#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
182#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
183#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a) 182#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
184#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) 183#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
185#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) 184#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
@@ -201,8 +200,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
201#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ 200#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
202 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) 201 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
203#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) 202#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
204#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
205 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
206#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a) 203#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
207#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) 204#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
208#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) 205#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 68b04f5b10ce..6bf6845e1a51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -234,11 +234,12 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
234 234
235 /* default is to dump the entire data segment */ 235 /* default is to dump the entire data segment */
236 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { 236 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
237 struct iwl_trans *trans = trans(priv);
237 priv->dbgfs_sram_offset = 0x800000; 238 priv->dbgfs_sram_offset = 0x800000;
238 if (priv->ucode_type == IWL_UCODE_INIT) 239 if (trans->shrd->ucode_type == IWL_UCODE_INIT)
239 priv->dbgfs_sram_len = trans(priv)->ucode_init.data.len; 240 priv->dbgfs_sram_len = trans->ucode_init.data.len;
240 else 241 else
241 priv->dbgfs_sram_len = trans(priv)->ucode_rt.data.len; 242 priv->dbgfs_sram_len = trans->ucode_rt.data.len;
242 } 243 }
243 len = priv->dbgfs_sram_len; 244 len = priv->dbgfs_sram_len;
244 245
@@ -415,7 +416,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
415 return -ENODATA; 416 return -ENODATA;
416 } 417 }
417 418
418 ptr = priv->eeprom; 419 ptr = priv->shrd->eeprom;
419 if (!ptr) { 420 if (!ptr) {
420 IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); 421 IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
421 return -ENOMEM; 422 return -ENOMEM;
@@ -427,7 +428,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
427 IWL_ERR(priv, "Can not allocate Buffer\n"); 428 IWL_ERR(priv, "Can not allocate Buffer\n");
428 return -ENOMEM; 429 return -ENOMEM;
429 } 430 }
430 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); 431 eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
431 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, " 432 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
432 "version: 0x%x\n", 433 "version: 0x%x\n",
433 (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) 434 (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 556e4a2c19bc..69ecf6e2e658 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -60,11 +60,10 @@ struct iwl_tx_queue;
60 60
61/* Default noise level to report when noise measurement is not available. 61/* Default noise level to report when noise measurement is not available.
62 * This may be because we're: 62 * This may be because we're:
63 * 1) Not associated (4965, no beacon statistics being sent to driver) 63 * 1) Not associated no beacon statistics being sent to driver)
64 * 2) Scanning (noise measurement does not apply to associated channel) 64 * 2) Scanning (noise measurement does not apply to associated channel)
65 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
66 * Use default noise value of -127 ... this is below the range of measurable 65 * Use default noise value of -127 ... this is below the range of measurable
67 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. 66 * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
68 * Also, -127 works better than 0 when averaging frames with/without 67 * Also, -127 works better than 0 when averaging frames with/without
69 * noise info (e.g. averaging might be done in app); measured dBm values are 68 * noise info (e.g. averaging might be done in app); measured dBm values are
70 * always negative ... using a negative value as the default keeps all 69 * always negative ... using a negative value as the default keeps all
@@ -441,29 +440,6 @@ enum iwlagn_chain_noise_state {
441 IWL_CHAIN_NOISE_DONE, 440 IWL_CHAIN_NOISE_DONE,
442}; 441};
443 442
444
445/*
446 * enum iwl_calib
447 * defines the order in which results of initial calibrations
448 * should be sent to the runtime uCode
449 */
450enum iwl_calib {
451 IWL_CALIB_XTAL,
452 IWL_CALIB_DC,
453 IWL_CALIB_LO,
454 IWL_CALIB_TX_IQ,
455 IWL_CALIB_TX_IQ_PERD,
456 IWL_CALIB_BASE_BAND,
457 IWL_CALIB_TEMP_OFFSET,
458 IWL_CALIB_MAX
459};
460
461/* Opaque calibration results */
462struct iwl_calib_result {
463 void *buf;
464 size_t buf_len;
465};
466
467/* Sensitivity calib data */ 443/* Sensitivity calib data */
468struct iwl_sensitivity_data { 444struct iwl_sensitivity_data {
469 u32 auto_corr_ofdm; 445 u32 auto_corr_ofdm;
@@ -703,35 +679,6 @@ struct iwl_force_reset {
703 */ 679 */
704#define IWLAGN_EXT_BEACON_TIME_POS 22 680#define IWLAGN_EXT_BEACON_TIME_POS 22
705 681
706/**
707 * struct iwl_notification_wait - notification wait entry
708 * @list: list head for global list
709 * @fn: function called with the notification
710 * @cmd: command ID
711 *
712 * This structure is not used directly, to wait for a
713 * notification declare it on the stack, and call
714 * iwlagn_init_notification_wait() with appropriate
715 * parameters. Then do whatever will cause the ucode
716 * to notify the driver, and to wait for that then
717 * call iwlagn_wait_notification().
718 *
719 * Each notification is one-shot. If at some point we
720 * need to support multi-shot notifications (which
721 * can't be allocated on the stack) we need to modify
722 * the code for them.
723 */
724struct iwl_notification_wait {
725 struct list_head list;
726
727 void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
728 void *data);
729 void *fn_data;
730
731 u8 cmd;
732 bool triggered, aborted;
733};
734
735struct iwl_rxon_context { 682struct iwl_rxon_context {
736 struct ieee80211_vif *vif; 683 struct ieee80211_vif *vif;
737 684
@@ -794,7 +741,7 @@ enum iwl_scan_type {
794 IWL_SCAN_ROC, 741 IWL_SCAN_ROC,
795}; 742};
796 743
797#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL 744#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
798struct iwl_testmode_trace { 745struct iwl_testmode_trace {
799 u32 buff_size; 746 u32 buff_size;
800 u32 total_size; 747 u32 total_size;
@@ -804,6 +751,12 @@ struct iwl_testmode_trace {
804 dma_addr_t dma_addr; 751 dma_addr_t dma_addr;
805 bool trace_enabled; 752 bool trace_enabled;
806}; 753};
754struct iwl_testmode_sram {
755 u32 buff_size;
756 u32 num_chunks;
757 u8 *buff_addr;
758 bool sram_readed;
759};
807#endif 760#endif
808 761
809struct iwl_wipan_noa_data { 762struct iwl_wipan_noa_data {
@@ -868,9 +821,6 @@ struct iwl_priv {
868 s32 temperature; /* Celsius */ 821 s32 temperature; /* Celsius */
869 s32 last_temperature; 822 s32 last_temperature;
870 823
871 /* init calibration results */
872 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
873
874 struct iwl_wipan_noa_data __rcu *noa_data; 824 struct iwl_wipan_noa_data __rcu *noa_data;
875 825
876 /* Scan related variables */ 826 /* Scan related variables */
@@ -897,18 +847,12 @@ struct iwl_priv {
897 u32 ucode_ver; /* version of ucode, copy of 847 u32 ucode_ver; /* version of ucode, copy of
898 iwl_ucode.ver */ 848 iwl_ucode.ver */
899 849
900 enum iwl_ucode_type ucode_type;
901 char firmware_name[25]; 850 char firmware_name[25];
902 851
903 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; 852 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
904 853
905 __le16 switch_channel; 854 __le16 switch_channel;
906 855
907 struct {
908 u32 error_event_table;
909 u32 log_event_table;
910 } device_pointers;
911
912 u16 active_rate; 856 u16 active_rate;
913 857
914 u8 start_calib; 858 u8 start_calib;
@@ -942,10 +886,6 @@ struct iwl_priv {
942 /* Indication if ieee80211_ops->open has been called */ 886 /* Indication if ieee80211_ops->open has been called */
943 u8 is_open; 887 u8 is_open;
944 888
945 /* eeprom -- this is in the card's little endian byte order */
946 u8 *eeprom;
947 struct iwl_eeprom_calib_info *calib_info;
948
949 enum nl80211_iftype iw_mode; 889 enum nl80211_iftype iw_mode;
950 890
951 /* Last Rx'd beacon timestamp */ 891 /* Last Rx'd beacon timestamp */
@@ -1001,10 +941,6 @@ struct iwl_priv {
1001 /* counts reply_tx error */ 941 /* counts reply_tx error */
1002 struct reply_tx_error_statistics reply_tx_stats; 942 struct reply_tx_error_statistics reply_tx_stats;
1003 struct reply_agg_tx_error_statistics reply_agg_tx_stats; 943 struct reply_agg_tx_error_statistics reply_agg_tx_stats;
1004 /* notification wait support */
1005 struct list_head notif_waits;
1006 spinlock_t notif_wait_lock;
1007 wait_queue_head_t notif_waitq;
1008 944
1009 /* remain-on-channel offload support */ 945 /* remain-on-channel offload support */
1010 struct ieee80211_channel *hw_roc_channel; 946 struct ieee80211_channel *hw_roc_channel;
@@ -1082,8 +1018,9 @@ struct iwl_priv {
1082 struct led_classdev led; 1018 struct led_classdev led;
1083 unsigned long blink_on, blink_off; 1019 unsigned long blink_on, blink_off;
1084 bool led_registered; 1020 bool led_registered;
1085#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL 1021#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1086 struct iwl_testmode_trace testmode_trace; 1022 struct iwl_testmode_trace testmode_trace;
1023 struct iwl_testmode_sram testmode_sram;
1087 u32 tm_fixed_rate; 1024 u32 tm_fixed_rate;
1088#endif 1025#endif
1089 1026
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index dcada0827ea4..6fcc7d586b24 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -215,11 +215,11 @@ static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
215 return ret; 215 return ret;
216} 216}
217 217
218u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) 218u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset)
219{ 219{
220 if (!priv->eeprom) 220 if (!shrd->eeprom)
221 return 0; 221 return 0;
222 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); 222 return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8);
223} 223}
224 224
225int iwl_eeprom_check_version(struct iwl_priv *priv) 225int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -227,8 +227,8 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
227 u16 eeprom_ver; 227 u16 eeprom_ver;
228 u16 calib_ver; 228 u16 calib_ver;
229 229
230 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION); 230 eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
231 calib_ver = iwlagn_eeprom_calib_version(priv); 231 calib_ver = iwl_eeprom_calib_version(priv->shrd);
232 232
233 if (eeprom_ver < priv->cfg->eeprom_ver || 233 if (eeprom_ver < priv->cfg->eeprom_ver ||
234 calib_ver < priv->cfg->eeprom_calib_ver) 234 calib_ver < priv->cfg->eeprom_calib_ver)
@@ -249,11 +249,12 @@ err:
249 249
250int iwl_eeprom_check_sku(struct iwl_priv *priv) 250int iwl_eeprom_check_sku(struct iwl_priv *priv)
251{ 251{
252 struct iwl_shared *shrd = priv->shrd;
252 u16 radio_cfg; 253 u16 radio_cfg;
253 254
254 if (!priv->cfg->sku) { 255 if (!priv->cfg->sku) {
255 /* not using sku overwrite */ 256 /* not using sku overwrite */
256 priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); 257 priv->cfg->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
257 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE && 258 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
258 !priv->cfg->ht_params) { 259 !priv->cfg->ht_params) {
259 IWL_ERR(priv, "Invalid 11n configuration\n"); 260 IWL_ERR(priv, "Invalid 11n configuration\n");
@@ -269,7 +270,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
269 270
270 if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) { 271 if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
271 /* not using .cfg overwrite */ 272 /* not using .cfg overwrite */
272 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 273 radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
273 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); 274 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
274 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); 275 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
275 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) { 276 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
@@ -289,9 +290,9 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
289 return 0; 290 return 0;
290} 291}
291 292
292void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) 293void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
293{ 294{
294 const u8 *addr = iwl_eeprom_query_addr(priv, 295 const u8 *addr = iwl_eeprom_query_addr(shrd,
295 EEPROM_MAC_ADDRESS); 296 EEPROM_MAC_ADDRESS);
296 memcpy(mac, addr, ETH_ALEN); 297 memcpy(mac, addr, ETH_ALEN);
297} 298}
@@ -582,6 +583,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
582 583
583void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) 584void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
584{ 585{
586 struct iwl_shared *shrd = priv->shrd;
585 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; 587 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
586 int idx, entries; 588 int idx, entries;
587 __le16 *txp_len; 589 __le16 *txp_len;
@@ -590,10 +592,10 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
590 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); 592 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
591 593
592 /* the length is in 16-bit words, but we want entries */ 594 /* the length is in 16-bit words, but we want entries */
593 txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS); 595 txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS);
594 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; 596 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
595 597
596 txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS); 598 txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS);
597 599
598 for (idx = 0; idx < entries; idx++) { 600 for (idx = 0; idx < entries; idx++) {
599 txp = &txp_array[idx]; 601 txp = &txp_array[idx];
@@ -646,12 +648,13 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
646/** 648/**
647 * iwl_eeprom_init - read EEPROM contents 649 * iwl_eeprom_init - read EEPROM contents
648 * 650 *
649 * Load the EEPROM contents from adapter into priv->eeprom 651 * Load the EEPROM contents from adapter into shrd->eeprom
650 * 652 *
651 * NOTE: This routine uses the non-debug IO access functions. 653 * NOTE: This routine uses the non-debug IO access functions.
652 */ 654 */
653int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) 655int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
654{ 656{
657 struct iwl_shared *shrd = priv->shrd;
655 __le16 *e; 658 __le16 *e;
656 u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP); 659 u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
657 int sz; 660 int sz;
@@ -666,12 +669,12 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
666 /* allocate eeprom */ 669 /* allocate eeprom */
667 sz = priv->cfg->base_params->eeprom_size; 670 sz = priv->cfg->base_params->eeprom_size;
668 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz); 671 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
669 priv->eeprom = kzalloc(sz, GFP_KERNEL); 672 shrd->eeprom = kzalloc(sz, GFP_KERNEL);
670 if (!priv->eeprom) { 673 if (!shrd->eeprom) {
671 ret = -ENOMEM; 674 ret = -ENOMEM;
672 goto alloc_err; 675 goto alloc_err;
673 } 676 }
674 e = (__le16 *)priv->eeprom; 677 e = (__le16 *)shrd->eeprom;
675 678
676 iwl_apm_init(priv); 679 iwl_apm_init(priv);
677 680
@@ -746,7 +749,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
746 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n", 749 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
747 (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) 750 (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
748 ? "OTP" : "EEPROM", 751 ? "OTP" : "EEPROM",
749 iwl_eeprom_query16(priv, EEPROM_VERSION)); 752 iwl_eeprom_query16(shrd, EEPROM_VERSION));
750 753
751 ret = 0; 754 ret = 0;
752done: 755done:
@@ -754,17 +757,17 @@ done:
754 757
755err: 758err:
756 if (ret) 759 if (ret)
757 iwl_eeprom_free(priv); 760 iwl_eeprom_free(priv->shrd);
758 /* Reset chip to save power until we load uCode during "up". */ 761 /* Reset chip to save power until we load uCode during "up". */
759 iwl_apm_stop(priv); 762 iwl_apm_stop(priv);
760alloc_err: 763alloc_err:
761 return ret; 764 return ret;
762} 765}
763 766
764void iwl_eeprom_free(struct iwl_priv *priv) 767void iwl_eeprom_free(struct iwl_shared *shrd)
765{ 768{
766 kfree(priv->eeprom); 769 kfree(shrd->eeprom);
767 priv->eeprom = NULL; 770 shrd->eeprom = NULL;
768} 771}
769 772
770static void iwl_init_band_reference(const struct iwl_priv *priv, 773static void iwl_init_band_reference(const struct iwl_priv *priv,
@@ -772,49 +775,50 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
772 const struct iwl_eeprom_channel **eeprom_ch_info, 775 const struct iwl_eeprom_channel **eeprom_ch_info,
773 const u8 **eeprom_ch_index) 776 const u8 **eeprom_ch_index)
774{ 777{
778 struct iwl_shared *shrd = priv->shrd;
775 u32 offset = priv->cfg->lib-> 779 u32 offset = priv->cfg->lib->
776 eeprom_ops.regulatory_bands[eep_band - 1]; 780 eeprom_ops.regulatory_bands[eep_band - 1];
777 switch (eep_band) { 781 switch (eep_band) {
778 case 1: /* 2.4GHz band */ 782 case 1: /* 2.4GHz band */
779 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); 783 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
780 *eeprom_ch_info = (struct iwl_eeprom_channel *) 784 *eeprom_ch_info = (struct iwl_eeprom_channel *)
781 iwl_eeprom_query_addr(priv, offset); 785 iwl_eeprom_query_addr(shrd, offset);
782 *eeprom_ch_index = iwl_eeprom_band_1; 786 *eeprom_ch_index = iwl_eeprom_band_1;
783 break; 787 break;
784 case 2: /* 4.9GHz band */ 788 case 2: /* 4.9GHz band */
785 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); 789 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
786 *eeprom_ch_info = (struct iwl_eeprom_channel *) 790 *eeprom_ch_info = (struct iwl_eeprom_channel *)
787 iwl_eeprom_query_addr(priv, offset); 791 iwl_eeprom_query_addr(shrd, offset);
788 *eeprom_ch_index = iwl_eeprom_band_2; 792 *eeprom_ch_index = iwl_eeprom_band_2;
789 break; 793 break;
790 case 3: /* 5.2GHz band */ 794 case 3: /* 5.2GHz band */
791 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); 795 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
792 *eeprom_ch_info = (struct iwl_eeprom_channel *) 796 *eeprom_ch_info = (struct iwl_eeprom_channel *)
793 iwl_eeprom_query_addr(priv, offset); 797 iwl_eeprom_query_addr(shrd, offset);
794 *eeprom_ch_index = iwl_eeprom_band_3; 798 *eeprom_ch_index = iwl_eeprom_band_3;
795 break; 799 break;
796 case 4: /* 5.5GHz band */ 800 case 4: /* 5.5GHz band */
797 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); 801 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
798 *eeprom_ch_info = (struct iwl_eeprom_channel *) 802 *eeprom_ch_info = (struct iwl_eeprom_channel *)
799 iwl_eeprom_query_addr(priv, offset); 803 iwl_eeprom_query_addr(shrd, offset);
800 *eeprom_ch_index = iwl_eeprom_band_4; 804 *eeprom_ch_index = iwl_eeprom_band_4;
801 break; 805 break;
802 case 5: /* 5.7GHz band */ 806 case 5: /* 5.7GHz band */
803 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); 807 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
804 *eeprom_ch_info = (struct iwl_eeprom_channel *) 808 *eeprom_ch_info = (struct iwl_eeprom_channel *)
805 iwl_eeprom_query_addr(priv, offset); 809 iwl_eeprom_query_addr(shrd, offset);
806 *eeprom_ch_index = iwl_eeprom_band_5; 810 *eeprom_ch_index = iwl_eeprom_band_5;
807 break; 811 break;
808 case 6: /* 2.4GHz ht40 channels */ 812 case 6: /* 2.4GHz ht40 channels */
809 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); 813 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
810 *eeprom_ch_info = (struct iwl_eeprom_channel *) 814 *eeprom_ch_info = (struct iwl_eeprom_channel *)
811 iwl_eeprom_query_addr(priv, offset); 815 iwl_eeprom_query_addr(shrd, offset);
812 *eeprom_ch_index = iwl_eeprom_band_6; 816 *eeprom_ch_index = iwl_eeprom_band_6;
813 break; 817 break;
814 case 7: /* 5 GHz ht40 channels */ 818 case 7: /* 5 GHz ht40 channels */
815 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); 819 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
816 *eeprom_ch_info = (struct iwl_eeprom_channel *) 820 *eeprom_ch_info = (struct iwl_eeprom_channel *)
817 iwl_eeprom_query_addr(priv, offset); 821 iwl_eeprom_query_addr(shrd, offset);
818 *eeprom_ch_index = iwl_eeprom_band_7; 822 *eeprom_ch_index = iwl_eeprom_band_7;
819 break; 823 break;
820 default: 824 default:
@@ -1064,7 +1068,7 @@ void iwl_rf_config(struct iwl_priv *priv)
1064{ 1068{
1065 u16 radio_cfg; 1069 u16 radio_cfg;
1066 1070
1067 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 1071 radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG);
1068 1072
1069 /* write radio config values to register */ 1073 /* write radio config values to register */
1070 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) { 1074 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index c94747e7299e..9fa937ec35e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -66,6 +66,7 @@
66#include <net/mac80211.h> 66#include <net/mac80211.h>
67 67
68struct iwl_priv; 68struct iwl_priv;
69struct iwl_shared;
69 70
70/* 71/*
71 * EEPROM access time values: 72 * EEPROM access time values:
@@ -305,11 +306,11 @@ struct iwl_eeprom_ops {
305 306
306 307
307int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev); 308int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
308void iwl_eeprom_free(struct iwl_priv *priv); 309void iwl_eeprom_free(struct iwl_shared *shrd);
309int iwl_eeprom_check_version(struct iwl_priv *priv); 310int iwl_eeprom_check_version(struct iwl_priv *priv);
310int iwl_eeprom_check_sku(struct iwl_priv *priv); 311int iwl_eeprom_check_sku(struct iwl_priv *priv);
311const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); 312const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
312u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset); 313u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
313int iwl_init_channel_map(struct iwl_priv *priv); 314int iwl_init_channel_map(struct iwl_priv *priv);
314void iwl_free_channel_map(struct iwl_priv *priv); 315void iwl_free_channel_map(struct iwl_priv *priv);
315const struct iwl_channel_info *iwl_get_channel_info( 316const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3ffa8e62b856..3464cad7e38c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -143,7 +143,7 @@ u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
143 143
144 spin_lock_irqsave(&bus->reg_lock, flags); 144 spin_lock_irqsave(&bus->reg_lock, flags);
145 iwl_grab_nic_access(bus); 145 iwl_grab_nic_access(bus);
146 value = iwl_read32(bus(bus), reg); 146 value = iwl_read32(bus, reg);
147 iwl_release_nic_access(bus); 147 iwl_release_nic_access(bus);
148 spin_unlock_irqrestore(&bus->reg_lock, flags); 148 spin_unlock_irqrestore(&bus->reg_lock, flags);
149 149
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index 05b1f0d2f387..e3944f4e4fd6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -427,7 +427,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
427 iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR, 427 iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
428 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); 428 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
429 429
430 base = priv->device_pointers.error_event_table; 430 base = priv->shrd->device_pointers.error_event_table;
431 if (iwlagn_hw_valid_rtc_data_addr(base)) { 431 if (iwlagn_hw_valid_rtc_data_addr(base)) {
432 spin_lock_irqsave(&bus(priv)->reg_lock, flags); 432 spin_lock_irqsave(&bus(priv)->reg_lock, flags);
433 ret = iwl_grab_nic_access_silent(bus(priv)); 433 ret = iwl_grab_nic_access_silent(bus(priv));
@@ -481,15 +481,11 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
481{ 481{
482 struct iwl_priv *priv = hw->priv; 482 struct iwl_priv *priv = hw->priv;
483 483
484 IWL_DEBUG_MACDUMP(priv, "enter\n");
485
486 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 484 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
487 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 485 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
488 486
489 if (iwlagn_tx_skb(priv, skb)) 487 if (iwlagn_tx_skb(priv, skb))
490 dev_kfree_skb_any(skb); 488 dev_kfree_skb_any(skb);
491
492 IWL_DEBUG_MACDUMP(priv, "leave\n");
493} 489}
494 490
495static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 491static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -521,6 +517,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
521 return -EOPNOTSUPP; 517 return -EOPNOTSUPP;
522 } 518 }
523 519
520 switch (key->cipher) {
521 case WLAN_CIPHER_SUITE_TKIP:
522 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
523 /* fall through */
524 case WLAN_CIPHER_SUITE_CCMP:
525 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
526 break;
527 default:
528 break;
529 }
530
524 /* 531 /*
525 * We could program these keys into the hardware as well, but we 532 * We could program these keys into the hardware as well, but we
526 * don't expect much multicast traffic in IBSS and having keys 533 * don't expect much multicast traffic in IBSS and having keys
@@ -804,21 +811,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
804 811
805 /* Configure HT40 channels */ 812 /* Configure HT40 channels */
806 ctx->ht.enabled = conf_is_ht(conf); 813 ctx->ht.enabled = conf_is_ht(conf);
807 if (ctx->ht.enabled) { 814 if (ctx->ht.enabled)
808 if (conf_is_ht40_minus(conf)) { 815 iwlagn_config_ht40(conf, ctx);
809 ctx->ht.extension_chan_offset = 816 else
810 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
811 ctx->ht.is_40mhz = true;
812 } else if (conf_is_ht40_plus(conf)) {
813 ctx->ht.extension_chan_offset =
814 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
815 ctx->ht.is_40mhz = true;
816 } else {
817 ctx->ht.extension_chan_offset =
818 IEEE80211_HT_PARAM_CHA_SEC_NONE;
819 ctx->ht.is_40mhz = false;
820 }
821 } else
822 ctx->ht.is_40mhz = false; 817 ctx->ht.is_40mhz = false;
823 818
824 if ((le16_to_cpu(ctx->staging.channel) != ch)) 819 if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -1053,6 +1048,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
1053 int ret; 1048 int ret;
1054 u8 sta_id; 1049 u8 sta_id;
1055 1050
1051 if (ctx->ctxid != IWL_RXON_CTX_PAN)
1052 return 0;
1053
1056 IWL_DEBUG_MAC80211(priv, "enter\n"); 1054 IWL_DEBUG_MAC80211(priv, "enter\n");
1057 mutex_lock(&priv->shrd->mutex); 1055 mutex_lock(&priv->shrd->mutex);
1058 1056
@@ -1102,6 +1100,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
1102 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1100 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1103 struct iwl_rxon_context *ctx = vif_priv->ctx; 1101 struct iwl_rxon_context *ctx = vif_priv->ctx;
1104 1102
1103 if (ctx->ctxid != IWL_RXON_CTX_PAN)
1104 return;
1105
1105 IWL_DEBUG_MAC80211(priv, "enter\n"); 1106 IWL_DEBUG_MAC80211(priv, "enter\n");
1106 mutex_lock(&priv->shrd->mutex); 1107 mutex_lock(&priv->shrd->mutex);
1107 1108
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 86d6a2354e8a..850ec8e51b17 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -60,6 +60,7 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#include <linux/module.h>
63#include <linux/pci.h> 64#include <linux/pci.h>
64#include <linux/pci-aspm.h> 65#include <linux/pci-aspm.h>
65 66
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 1f7a93c67c45..29a7284aa3ef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -97,6 +97,7 @@
97struct iwl_cfg; 97struct iwl_cfg;
98struct iwl_bus; 98struct iwl_bus;
99struct iwl_priv; 99struct iwl_priv;
100struct iwl_trans;
100struct iwl_sensitivity_ranges; 101struct iwl_sensitivity_ranges;
101struct iwl_trans_ops; 102struct iwl_trans_ops;
102 103
@@ -120,7 +121,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
120 * @restart_fw: restart firmware, default = 1 121 * @restart_fw: restart firmware, default = 1
121 * @plcp_check: enable plcp health check, default = true 122 * @plcp_check: enable plcp health check, default = true
122 * @ack_check: disable ack health check, default = false 123 * @ack_check: disable ack health check, default = false
123 * @wd_disable: enable stuck queue check, default = false 124 * @wd_disable: enable stuck queue check, default = 0
124 * @bt_coex_active: enable bt coex, default = true 125 * @bt_coex_active: enable bt coex, default = true
125 * @led_mode: system default, default = 0 126 * @led_mode: system default, default = 0
126 * @no_sleep_autoadjust: disable autoadjust, default = true 127 * @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +142,7 @@ struct iwl_mod_params {
141 int restart_fw; 142 int restart_fw;
142 bool plcp_check; 143 bool plcp_check;
143 bool ack_check; 144 bool ack_check;
144 bool wd_disable; 145 int wd_disable;
145 bool bt_coex_active; 146 bool bt_coex_active;
146 int led_mode; 147 int led_mode;
147 bool no_sleep_autoadjust; 148 bool no_sleep_autoadjust;
@@ -174,7 +175,6 @@ struct iwl_mod_params {
174 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit 175 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
175 * relevant for 1000, 6000 and up 176 * relevant for 1000, 6000 and up
176 * @wd_timeout: TX queues watchdog timeout 177 * @wd_timeout: TX queues watchdog timeout
177 * @calib_init_cfg: setup initial calibrations for the hw
178 * @calib_rt_cfg: setup runtime calibrations for the hw 178 * @calib_rt_cfg: setup runtime calibrations for the hw
179 * @struct iwl_sensitivity_ranges: range of sensitivity values 179 * @struct iwl_sensitivity_ranges: range of sensitivity values
180 */ 180 */
@@ -195,7 +195,6 @@ struct iwl_hw_params {
195 u32 ct_kill_exit_threshold; 195 u32 ct_kill_exit_threshold;
196 unsigned int wd_timeout; 196 unsigned int wd_timeout;
197 197
198 u32 calib_init_cfg;
199 u32 calib_rt_cfg; 198 u32 calib_rt_cfg;
200 const struct iwl_sensitivity_ranges *sens; 199 const struct iwl_sensitivity_ranges *sens;
201}; 200};
@@ -259,6 +258,52 @@ struct iwl_tid_data {
259}; 258};
260 259
261/** 260/**
261 * enum iwl_ucode_type
262 *
263 * The type of ucode currently loaded on the hardware.
264 *
265 * @IWL_UCODE_NONE: No ucode loaded
266 * @IWL_UCODE_REGULAR: Normal runtime ucode
267 * @IWL_UCODE_INIT: Initial ucode
268 * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
269 */
270enum iwl_ucode_type {
271 IWL_UCODE_NONE,
272 IWL_UCODE_REGULAR,
273 IWL_UCODE_INIT,
274 IWL_UCODE_WOWLAN,
275};
276
277/**
278 * struct iwl_notification_wait - notification wait entry
279 * @list: list head for global list
280 * @fn: function called with the notification
281 * @cmd: command ID
282 *
283 * This structure is not used directly, to wait for a
284 * notification declare it on the stack, and call
285 * iwlagn_init_notification_wait() with appropriate
286 * parameters. Then do whatever will cause the ucode
287 * to notify the driver, and to wait for that then
288 * call iwlagn_wait_notification().
289 *
290 * Each notification is one-shot. If at some point we
291 * need to support multi-shot notifications (which
292 * can't be allocated on the stack) we need to modify
293 * the code for them.
294 */
295struct iwl_notification_wait {
296 struct list_head list;
297
298 void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt,
299 void *data);
300 void *fn_data;
301
302 u8 cmd;
303 bool triggered, aborted;
304};
305
306/**
262 * struct iwl_shared - shared fields for all the layers of the driver 307 * struct iwl_shared - shared fields for all the layers of the driver
263 * 308 *
264 * @dbg_level_dev: dbg level set per device. Prevails on 309 * @dbg_level_dev: dbg level set per device. Prevails on
@@ -275,6 +320,11 @@ struct iwl_tid_data {
275 * @sta_lock: protects the station table. 320 * @sta_lock: protects the station table.
276 * If lock and sta_lock are needed, lock must be acquired first. 321 * If lock and sta_lock are needed, lock must be acquired first.
277 * @mutex: 322 * @mutex:
323 * @ucode_type: indicator of loaded ucode image
324 * @notif_waits: things waiting for notification
325 * @notif_wait_lock: lock protecting notification
326 * @notif_waitq: head of notification wait queue
327 * @device_pointers: pointers to ucode event tables
278 */ 328 */
279struct iwl_shared { 329struct iwl_shared {
280#ifdef CONFIG_IWLWIFI_DEBUG 330#ifdef CONFIG_IWLWIFI_DEBUG
@@ -302,6 +352,23 @@ struct iwl_shared {
302 struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT]; 352 struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
303 353
304 wait_queue_head_t wait_command_queue; 354 wait_queue_head_t wait_command_queue;
355
356 /* eeprom -- this is in the card's little endian byte order */
357 u8 *eeprom;
358
359 /* ucode related variables */
360 enum iwl_ucode_type ucode_type;
361
362 /* notification wait support */
363 struct list_head notif_waits;
364 spinlock_t notif_wait_lock;
365 wait_queue_head_t notif_waitq;
366
367 struct {
368 u32 error_event_table;
369 u32 log_event_table;
370 } device_pointers;
371
305}; 372};
306 373
307/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */ 374/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
@@ -445,6 +512,24 @@ bool iwl_check_for_ct_kill(struct iwl_priv *priv);
445void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac); 512void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
446void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac); 513void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
447 514
515/* notification wait support */
516void iwl_abort_notification_waits(struct iwl_shared *shrd);
517void __acquires(wait_entry)
518iwl_init_notification_wait(struct iwl_shared *shrd,
519 struct iwl_notification_wait *wait_entry,
520 u8 cmd,
521 void (*fn)(struct iwl_trans *trans,
522 struct iwl_rx_packet *pkt,
523 void *data),
524 void *fn_data);
525int __must_check __releases(wait_entry)
526iwl_wait_notification(struct iwl_shared *shrd,
527 struct iwl_notification_wait *wait_entry,
528 unsigned long timeout);
529void __releases(wait_entry)
530iwl_remove_notification(struct iwl_shared *shrd,
531 struct iwl_notification_wait *wait_entry);
532
448#ifdef CONFIG_IWLWIFI_DEBUGFS 533#ifdef CONFIG_IWLWIFI_DEBUGFS
449void iwl_reset_traffic_log(struct iwl_priv *priv); 534void iwl_reset_traffic_log(struct iwl_priv *priv);
450#endif /* CONFIG_IWLWIFI_DEBUGFS */ 535#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index e3882d0cfc85..a874eb7b5f8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -77,6 +77,7 @@
77#include "iwl-agn.h" 77#include "iwl-agn.h"
78#include "iwl-testmode.h" 78#include "iwl-testmode.h"
79#include "iwl-trans.h" 79#include "iwl-trans.h"
80#include "iwl-bus.h"
80 81
81/* The TLVs used in the gnl message policy between the kernel module and 82/* The TLVs used in the gnl message policy between the kernel module and
82 * user space application. iwl_testmode_gnl_msg_policy is to be carried 83 * user space application. iwl_testmode_gnl_msg_policy is to be carried
@@ -106,6 +107,13 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
106 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, 107 [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
107 108
108 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, 109 [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
110
111 [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, },
112 [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, },
113 [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, },
114
115 [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
116 [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
109}; 117};
110 118
111/* 119/*
@@ -177,6 +185,18 @@ void iwl_testmode_init(struct iwl_priv *priv)
177{ 185{
178 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; 186 priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
179 priv->testmode_trace.trace_enabled = false; 187 priv->testmode_trace.trace_enabled = false;
188 priv->testmode_sram.sram_readed = false;
189}
190
191static void iwl_sram_cleanup(struct iwl_priv *priv)
192{
193 if (priv->testmode_sram.sram_readed) {
194 kfree(priv->testmode_sram.buff_addr);
195 priv->testmode_sram.buff_addr = NULL;
196 priv->testmode_sram.buff_size = 0;
197 priv->testmode_sram.num_chunks = 0;
198 priv->testmode_sram.sram_readed = false;
199 }
180} 200}
181 201
182static void iwl_trace_cleanup(struct iwl_priv *priv) 202static void iwl_trace_cleanup(struct iwl_priv *priv)
@@ -201,6 +221,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
201void iwl_testmode_cleanup(struct iwl_priv *priv) 221void iwl_testmode_cleanup(struct iwl_priv *priv)
202{ 222{
203 iwl_trace_cleanup(priv); 223 iwl_trace_cleanup(priv);
224 iwl_sram_cleanup(priv);
204} 225}
205 226
206/* 227/*
@@ -276,7 +297,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
276 IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs); 297 IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
277 298
278 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 299 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
279 case IWL_TM_CMD_APP2DEV_REG_READ32: 300 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
280 val32 = iwl_read32(bus(priv), ofs); 301 val32 = iwl_read32(bus(priv), ofs);
281 IWL_INFO(priv, "32bit value to read 0x%x\n", val32); 302 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
282 303
@@ -291,7 +312,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
291 IWL_DEBUG_INFO(priv, 312 IWL_DEBUG_INFO(priv,
292 "Error sending msg : %d\n", status); 313 "Error sending msg : %d\n", status);
293 break; 314 break;
294 case IWL_TM_CMD_APP2DEV_REG_WRITE32: 315 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
295 if (!tb[IWL_TM_ATTR_REG_VALUE32]) { 316 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
296 IWL_DEBUG_INFO(priv, 317 IWL_DEBUG_INFO(priv,
297 "Error finding value to write\n"); 318 "Error finding value to write\n");
@@ -302,7 +323,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
302 iwl_write32(bus(priv), ofs, val32); 323 iwl_write32(bus(priv), ofs, val32);
303 } 324 }
304 break; 325 break;
305 case IWL_TM_CMD_APP2DEV_REG_WRITE8: 326 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
306 if (!tb[IWL_TM_ATTR_REG_VALUE8]) { 327 if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
307 IWL_DEBUG_INFO(priv, "Error finding value to write\n"); 328 IWL_DEBUG_INFO(priv, "Error finding value to write\n");
308 return -ENOMSG; 329 return -ENOMSG;
@@ -312,6 +333,32 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
312 iwl_write8(bus(priv), ofs, val8); 333 iwl_write8(bus(priv), ofs, val8);
313 } 334 }
314 break; 335 break;
336 case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
337 val32 = iwl_read_prph(bus(priv), ofs);
338 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
339
340 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
341 if (!skb) {
342 IWL_DEBUG_INFO(priv, "Error allocating memory\n");
343 return -ENOMEM;
344 }
345 NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
346 status = cfg80211_testmode_reply(skb);
347 if (status < 0)
348 IWL_DEBUG_INFO(priv,
349 "Error sending msg : %d\n", status);
350 break;
351 case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
352 if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
353 IWL_DEBUG_INFO(priv,
354 "Error finding value to write\n");
355 return -ENOMSG;
356 } else {
357 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
358 IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
359 iwl_write_prph(bus(priv), ofs, val32);
360 }
361 break;
315 default: 362 default:
316 IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n"); 363 IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
317 return -ENOSYS; 364 return -ENOSYS;
@@ -330,7 +377,7 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
330 struct iwl_notification_wait calib_wait; 377 struct iwl_notification_wait calib_wait;
331 int ret; 378 int ret;
332 379
333 iwlagn_init_notification_wait(priv, &calib_wait, 380 iwl_init_notification_wait(priv->shrd, &calib_wait,
334 CALIBRATION_COMPLETE_NOTIFICATION, 381 CALIBRATION_COMPLETE_NOTIFICATION,
335 NULL, NULL); 382 NULL, NULL);
336 ret = iwlagn_init_alive_start(priv); 383 ret = iwlagn_init_alive_start(priv);
@@ -340,14 +387,14 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
340 goto cfg_init_calib_error; 387 goto cfg_init_calib_error;
341 } 388 }
342 389
343 ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ); 390 ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
344 if (ret) 391 if (ret)
345 IWL_DEBUG_INFO(priv, "Error detecting" 392 IWL_DEBUG_INFO(priv, "Error detecting"
346 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret); 393 " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
347 return ret; 394 return ret;
348 395
349cfg_init_calib_error: 396cfg_init_calib_error:
350 iwlagn_remove_notification(priv, &calib_wait); 397 iwl_remove_notification(priv->shrd, &calib_wait);
351 return ret; 398 return ret;
352} 399}
353 400
@@ -373,6 +420,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
373 struct sk_buff *skb; 420 struct sk_buff *skb;
374 unsigned char *rsp_data_ptr = NULL; 421 unsigned char *rsp_data_ptr = NULL;
375 int status = 0, rsp_data_len = 0; 422 int status = 0, rsp_data_len = 0;
423 char buf[32], *ptr = NULL;
424 unsigned int num, devid;
376 425
377 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 426 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
378 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: 427 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
@@ -420,8 +469,23 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
420 "Error starting the device: %d\n", status); 469 "Error starting the device: %d\n", status);
421 break; 470 break;
422 471
472 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
473 iwl_scan_cancel_timeout(priv, 200);
474 iwl_trans_stop_device(trans(priv));
475 status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
476 if (status) {
477 IWL_DEBUG_INFO(priv,
478 "Error loading WOWLAN ucode: %d\n", status);
479 break;
480 }
481 status = iwl_alive_start(priv);
482 if (status)
483 IWL_DEBUG_INFO(priv,
484 "Error starting the device: %d\n", status);
485 break;
486
423 case IWL_TM_CMD_APP2DEV_GET_EEPROM: 487 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
424 if (priv->eeprom) { 488 if (priv->shrd->eeprom) {
425 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 489 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
426 priv->cfg->base_params->eeprom_size + 20); 490 priv->cfg->base_params->eeprom_size + 20);
427 if (!skb) { 491 if (!skb) {
@@ -433,7 +497,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
433 IWL_TM_CMD_DEV2APP_EEPROM_RSP); 497 IWL_TM_CMD_DEV2APP_EEPROM_RSP);
434 NLA_PUT(skb, IWL_TM_ATTR_EEPROM, 498 NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
435 priv->cfg->base_params->eeprom_size, 499 priv->cfg->base_params->eeprom_size,
436 priv->eeprom); 500 priv->shrd->eeprom);
437 status = cfg80211_testmode_reply(skb); 501 status = cfg80211_testmode_reply(skb);
438 if (status < 0) 502 if (status < 0)
439 IWL_DEBUG_INFO(priv, 503 IWL_DEBUG_INFO(priv,
@@ -452,6 +516,43 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
452 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); 516 priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
453 break; 517 break;
454 518
519 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
520 IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver);
521
522 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
523 if (!skb) {
524 IWL_DEBUG_INFO(priv, "Error allocating memory\n");
525 return -ENOMEM;
526 }
527 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver);
528 status = cfg80211_testmode_reply(skb);
529 if (status < 0)
530 IWL_DEBUG_INFO(priv,
531 "Error sending msg : %d\n", status);
532 break;
533
534 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
535 bus_get_hw_id(bus(priv), buf, sizeof(buf));
536 ptr = buf;
537 strsep(&ptr, ":");
538 sscanf(strsep(&ptr, ":"), "%x", &num);
539 sscanf(strsep(&ptr, ":"), "%x", &devid);
540 IWL_INFO(priv, "Device ID = 0x%04x, SubDevice ID= 0x%04x\n",
541 num, devid);
542 devid |= (num << 16);
543
544 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
545 if (!skb) {
546 IWL_DEBUG_INFO(priv, "Error allocating memory\n");
547 return -ENOMEM;
548 }
549 NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
550 status = cfg80211_testmode_reply(skb);
551 if (status < 0)
552 IWL_DEBUG_INFO(priv,
553 "Error sending msg : %d\n", status);
554 break;
555
455 default: 556 default:
456 IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n"); 557 IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
457 return -ENOSYS; 558 return -ENOSYS;
@@ -532,7 +633,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
532 } 633 }
533 priv->testmode_trace.num_chunks = 634 priv->testmode_trace.num_chunks =
534 DIV_ROUND_UP(priv->testmode_trace.buff_size, 635 DIV_ROUND_UP(priv->testmode_trace.buff_size,
535 TRACE_CHUNK_SIZE); 636 DUMP_CHUNK_SIZE);
536 break; 637 break;
537 638
538 case IWL_TM_CMD_APP2DEV_END_TRACE: 639 case IWL_TM_CMD_APP2DEV_END_TRACE:
@@ -564,15 +665,15 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
564 idx = cb->args[4]; 665 idx = cb->args[4];
565 if (idx >= priv->testmode_trace.num_chunks) 666 if (idx >= priv->testmode_trace.num_chunks)
566 return -ENOENT; 667 return -ENOENT;
567 length = TRACE_CHUNK_SIZE; 668 length = DUMP_CHUNK_SIZE;
568 if (((idx + 1) == priv->testmode_trace.num_chunks) && 669 if (((idx + 1) == priv->testmode_trace.num_chunks) &&
569 (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE)) 670 (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
570 length = priv->testmode_trace.buff_size % 671 length = priv->testmode_trace.buff_size %
571 TRACE_CHUNK_SIZE; 672 DUMP_CHUNK_SIZE;
572 673
573 NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length, 674 NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
574 priv->testmode_trace.trace_addr + 675 priv->testmode_trace.trace_addr +
575 (TRACE_CHUNK_SIZE * idx)); 676 (DUMP_CHUNK_SIZE * idx));
576 idx++; 677 idx++;
577 cb->args[4] = idx; 678 cb->args[4] = idx;
578 return 0; 679 return 0;
@@ -618,6 +719,110 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
618 return 0; 719 return 0;
619} 720}
620 721
722/*
723 * This function handles the user application commands for SRAM data dump
724 *
725 * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
726 * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
727 *
728 * Several error will be retured, -EBUSY if the SRAM data retrieved by
729 * previous command has not been delivered to userspace, or -ENOMSG if
730 * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
731 * are missing, or -ENOMEM if the buffer allocation fails.
732 *
733 * Otherwise 0 is replied indicating the success of the SRAM reading.
734 *
735 * @hw: ieee80211_hw object that represents the device
736 * @tb: gnl message fields from the user space
737 */
738static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
739{
740 struct iwl_priv *priv = hw->priv;
741 u32 base, ofs, size, maxsize;
742
743 if (priv->testmode_sram.sram_readed)
744 return -EBUSY;
745
746 if (!tb[IWL_TM_ATTR_SRAM_ADDR]) {
747 IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n");
748 return -ENOMSG;
749 }
750 ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]);
751 if (!tb[IWL_TM_ATTR_SRAM_SIZE]) {
752 IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n");
753 return -ENOMSG;
754 }
755 size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]);
756 switch (priv->shrd->ucode_type) {
757 case IWL_UCODE_REGULAR:
758 maxsize = trans(priv)->ucode_rt.data.len;
759 break;
760 case IWL_UCODE_INIT:
761 maxsize = trans(priv)->ucode_init.data.len;
762 break;
763 case IWL_UCODE_WOWLAN:
764 maxsize = trans(priv)->ucode_wowlan.data.len;
765 break;
766 case IWL_UCODE_NONE:
767 IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n");
768 return -ENOSYS;
769 default:
770 IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n");
771 return -ENOSYS;
772 }
773 if ((ofs + size) > maxsize) {
774 IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n");
775 return -EINVAL;
776 }
777 priv->testmode_sram.buff_size = (size / 4) * 4;
778 priv->testmode_sram.buff_addr =
779 kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL);
780 if (priv->testmode_sram.buff_addr == NULL) {
781 IWL_DEBUG_INFO(priv, "Error allocating memory\n");
782 return -ENOMEM;
783 }
784 base = 0x800000;
785 _iwl_read_targ_mem_words(bus(priv), base + ofs,
786 priv->testmode_sram.buff_addr,
787 priv->testmode_sram.buff_size / 4);
788 priv->testmode_sram.num_chunks =
789 DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE);
790 priv->testmode_sram.sram_readed = true;
791 return 0;
792}
793
794static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
795 struct sk_buff *skb,
796 struct netlink_callback *cb)
797{
798 struct iwl_priv *priv = hw->priv;
799 int idx, length;
800
801 if (priv->testmode_sram.sram_readed) {
802 idx = cb->args[4];
803 if (idx >= priv->testmode_sram.num_chunks) {
804 iwl_sram_cleanup(priv);
805 return -ENOENT;
806 }
807 length = DUMP_CHUNK_SIZE;
808 if (((idx + 1) == priv->testmode_sram.num_chunks) &&
809 (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE))
810 length = priv->testmode_sram.buff_size %
811 DUMP_CHUNK_SIZE;
812
813 NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length,
814 priv->testmode_sram.buff_addr +
815 (DUMP_CHUNK_SIZE * idx));
816 idx++;
817 cb->args[4] = idx;
818 return 0;
819 } else
820 return -EFAULT;
821
822 nla_put_failure:
823 return -ENOBUFS;
824}
825
621 826
622/* The testmode gnl message handler that takes the gnl message from the 827/* The testmode gnl message handler that takes the gnl message from the
623 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then 828 * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
@@ -665,9 +870,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
665 IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n"); 870 IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
666 result = iwl_testmode_ucode(hw, tb); 871 result = iwl_testmode_ucode(hw, tb);
667 break; 872 break;
668 case IWL_TM_CMD_APP2DEV_REG_READ32: 873 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
669 case IWL_TM_CMD_APP2DEV_REG_WRITE32: 874 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
670 case IWL_TM_CMD_APP2DEV_REG_WRITE8: 875 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
876 case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
877 case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
671 IWL_DEBUG_INFO(priv, "testmode cmd to register\n"); 878 IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
672 result = iwl_testmode_reg(hw, tb); 879 result = iwl_testmode_reg(hw, tb);
673 break; 880 break;
@@ -677,6 +884,9 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
677 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: 884 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
678 case IWL_TM_CMD_APP2DEV_GET_EEPROM: 885 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
679 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: 886 case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
887 case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
888 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
889 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
680 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n"); 890 IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
681 result = iwl_testmode_driver(hw, tb); 891 result = iwl_testmode_driver(hw, tb);
682 break; 892 break;
@@ -693,6 +903,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
693 result = iwl_testmode_ownership(hw, tb); 903 result = iwl_testmode_ownership(hw, tb);
694 break; 904 break;
695 905
906 case IWL_TM_CMD_APP2DEV_READ_SRAM:
907 IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n");
908 result = iwl_testmode_sram(hw, tb);
909 break;
910
696 default: 911 default:
697 IWL_DEBUG_INFO(priv, "Unknown testmode command\n"); 912 IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
698 result = -ENOSYS; 913 result = -ENOSYS;
@@ -741,6 +956,10 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
741 IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n"); 956 IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
742 result = iwl_testmode_trace_dump(hw, tb, skb, cb); 957 result = iwl_testmode_trace_dump(hw, tb, skb, cb);
743 break; 958 break;
959 case IWL_TM_CMD_APP2DEV_DUMP_SRAM:
960 IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
961 result = iwl_testmode_sram_dump(hw, tb, skb, cb);
962 break;
744 default: 963 default:
745 result = -EINVAL; 964 result = -EINVAL;
746 break; 965 break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index b980bda4b0f8..26138f110340 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -76,9 +76,9 @@
76 * the actual uCode host command ID is carried with 76 * the actual uCode host command ID is carried with
77 * IWL_TM_ATTR_UCODE_CMD_ID 77 * IWL_TM_ATTR_UCODE_CMD_ID
78 * 78 *
79 * @IWL_TM_CMD_APP2DEV_REG_READ32: 79 * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
80 * @IWL_TM_CMD_APP2DEV_REG_WRITE32: 80 * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
81 * @IWL_TM_CMD_APP2DEV_REG_WRITE8: 81 * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
82 * commands from user applicaiton to access register 82 * commands from user applicaiton to access register
83 * 83 *
84 * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name 84 * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
@@ -103,16 +103,30 @@
103 * @IWL_TM_CMD_DEV2APP_EEPROM_RSP: 103 * @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
104 * commands from kernel space to carry the eeprom response 104 * commands from kernel space to carry the eeprom response
105 * to user application 105 * to user application
106 *
106 * @IWL_TM_CMD_APP2DEV_OWNERSHIP: 107 * @IWL_TM_CMD_APP2DEV_OWNERSHIP:
107 * commands from user application to own change the ownership of the uCode 108 * commands from user application to own change the ownership of the uCode
108 * if application has the ownership, the only host command from 109 * if application has the ownership, the only host command from
109 * testmode will deliver to uCode. Default owner is driver 110 * testmode will deliver to uCode. Default owner is driver
111 *
112 * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
113 * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
114 * commands from user applicaiton to indirectly access peripheral register
115 *
116 * @IWL_TM_CMD_APP2DEV_READ_SRAM:
117 * @IWL_TM_CMD_APP2DEV_DUMP_SRAM:
118 * commands from user applicaiton to read data in sram
119 *
120 * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image
121 * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
122 * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
123 *
110 */ 124 */
111enum iwl_tm_cmd_t { 125enum iwl_tm_cmd_t {
112 IWL_TM_CMD_APP2DEV_UCODE = 1, 126 IWL_TM_CMD_APP2DEV_UCODE = 1,
113 IWL_TM_CMD_APP2DEV_REG_READ32 = 2, 127 IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2,
114 IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3, 128 IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3,
115 IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4, 129 IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4,
116 IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5, 130 IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
117 IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6, 131 IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
118 IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7, 132 IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
@@ -126,7 +140,14 @@ enum iwl_tm_cmd_t {
126 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15, 140 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
127 IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16, 141 IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
128 IWL_TM_CMD_APP2DEV_OWNERSHIP = 17, 142 IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
129 IWL_TM_CMD_MAX = 18, 143 IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32 = 18,
144 IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19,
145 IWL_TM_CMD_APP2DEV_READ_SRAM = 20,
146 IWL_TM_CMD_APP2DEV_DUMP_SRAM = 21,
147 IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
148 IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
149 IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
150 IWL_TM_CMD_MAX = 25,
130}; 151};
131 152
132/* 153/*
@@ -196,6 +217,26 @@ enum iwl_tm_cmd_t {
196 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP, 217 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
197 * The mandatory fields are: 218 * The mandatory fields are:
198 * IWL_TM_ATTR_UCODE_OWNER for the new owner 219 * IWL_TM_ATTR_UCODE_OWNER for the new owner
220 *
221 * @IWL_TM_ATTR_SRAM_ADDR:
222 * @IWL_TM_ATTR_SRAM_SIZE:
223 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM,
224 * The mandatory fields are:
225 * IWL_TM_ATTR_SRAM_ADDR for the address in sram
226 * IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading
227 *
228 * @IWL_TM_ATTR_SRAM_DUMP:
229 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM,
230 * IWL_TM_ATTR_SRAM_DUMP for the data in sram
231 *
232 * @IWL_TM_ATTR_FW_VERSION:
233 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
234 * IWL_TM_ATTR_FW_VERSION for the uCode version
235 *
236 * @IWL_TM_ATTR_DEVICE_ID:
237 * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
238 * IWL_TM_ATTR_DEVICE_ID for the device ID information
239 *
199 */ 240 */
200enum iwl_tm_attr_t { 241enum iwl_tm_attr_t {
201 IWL_TM_ATTR_NOT_APPLICABLE = 0, 242 IWL_TM_ATTR_NOT_APPLICABLE = 0,
@@ -213,7 +254,12 @@ enum iwl_tm_attr_t {
213 IWL_TM_ATTR_TRACE_DUMP = 12, 254 IWL_TM_ATTR_TRACE_DUMP = 12,
214 IWL_TM_ATTR_FIXRATE = 13, 255 IWL_TM_ATTR_FIXRATE = 13,
215 IWL_TM_ATTR_UCODE_OWNER = 14, 256 IWL_TM_ATTR_UCODE_OWNER = 14,
216 IWL_TM_ATTR_MAX = 15, 257 IWL_TM_ATTR_SRAM_ADDR = 15,
258 IWL_TM_ATTR_SRAM_SIZE = 16,
259 IWL_TM_ATTR_SRAM_DUMP = 17,
260 IWL_TM_ATTR_FW_VERSION = 18,
261 IWL_TM_ATTR_DEVICE_ID = 19,
262 IWL_TM_ATTR_MAX = 20,
217}; 263};
218 264
219/* uCode trace buffer */ 265/* uCode trace buffer */
@@ -221,6 +267,8 @@ enum iwl_tm_attr_t {
221#define TRACE_BUFF_SIZE_MIN 0x20000 267#define TRACE_BUFF_SIZE_MIN 0x20000
222#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN 268#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
223#define TRACE_BUFF_PADD 0x2000 269#define TRACE_BUFF_PADD 0x2000
224#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024) 270
271/* Maximum data size of each dump it packet */
272#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
225 273
226#endif 274#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index afaaa2a51b96..5a384b309b09 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -354,6 +354,11 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
354 txq->swq_id = (hwq << 2) | ac; 354 txq->swq_id = (hwq << 2) | ac;
355} 355}
356 356
357static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
358{
359 return txq->swq_id & 0x3;
360}
361
357static inline void iwl_wake_queue(struct iwl_trans *trans, 362static inline void iwl_wake_queue(struct iwl_trans *trans,
358 struct iwl_tx_queue *txq, const char *msg) 363 struct iwl_tx_queue *txq, const char *msg)
359{ 364{
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index ee126f844a5c..2ee00e0f39d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -594,8 +594,8 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
594 struct iwl_trans_pcie *trans_pcie = 594 struct iwl_trans_pcie *trans_pcie =
595 IWL_TRANS_GET_PCIE_TRANS(trans); 595 IWL_TRANS_GET_PCIE_TRANS(trans);
596 596
597 base = priv->device_pointers.error_event_table; 597 base = trans->shrd->device_pointers.error_event_table;
598 if (priv->ucode_type == IWL_UCODE_INIT) { 598 if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
599 if (!base) 599 if (!base)
600 base = priv->init_errlog_ptr; 600 base = priv->init_errlog_ptr;
601 } else { 601 } else {
@@ -607,7 +607,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
607 IWL_ERR(trans, 607 IWL_ERR(trans,
608 "Not valid error log pointer 0x%08X for %s uCode\n", 608 "Not valid error log pointer 0x%08X for %s uCode\n",
609 base, 609 base,
610 (priv->ucode_type == IWL_UCODE_INIT) 610 (trans->shrd->ucode_type == IWL_UCODE_INIT)
611 ? "Init" : "RT"); 611 ? "Init" : "RT");
612 return; 612 return;
613 } 613 }
@@ -648,6 +648,21 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
648 IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver); 648 IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
649 IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver); 649 IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
650 IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd); 650 IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
651
652 IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
653 IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
654 IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
655 IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
656 IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
657 IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
658 IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
659 IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
660 IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
661 IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
662 IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
663 IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
664 IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
665 IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
651} 666}
652 667
653/** 668/**
@@ -709,8 +724,8 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
709 if (num_events == 0) 724 if (num_events == 0)
710 return pos; 725 return pos;
711 726
712 base = priv->device_pointers.log_event_table; 727 base = trans->shrd->device_pointers.log_event_table;
713 if (priv->ucode_type == IWL_UCODE_INIT) { 728 if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
714 if (!base) 729 if (!base)
715 base = priv->init_evtlog_ptr; 730 base = priv->init_evtlog_ptr;
716 } else { 731 } else {
@@ -823,8 +838,8 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
823 size_t bufsz = 0; 838 size_t bufsz = 0;
824 struct iwl_priv *priv = priv(trans); 839 struct iwl_priv *priv = priv(trans);
825 840
826 base = priv->device_pointers.log_event_table; 841 base = trans->shrd->device_pointers.log_event_table;
827 if (priv->ucode_type == IWL_UCODE_INIT) { 842 if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
828 logsize = priv->init_evtlog_size; 843 logsize = priv->init_evtlog_size;
829 if (!base) 844 if (!base)
830 base = priv->init_evtlog_ptr; 845 base = priv->init_evtlog_ptr;
@@ -838,7 +853,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
838 IWL_ERR(trans, 853 IWL_ERR(trans,
839 "Invalid event log pointer 0x%08X for %s uCode\n", 854 "Invalid event log pointer 0x%08X for %s uCode\n",
840 base, 855 base,
841 (priv->ucode_type == IWL_UCODE_INIT) 856 (trans->shrd->ucode_type == IWL_UCODE_INIT)
842 ? "Init" : "RT"); 857 ? "Init" : "RT");
843 return -EINVAL; 858 return -EINVAL;
844 } 859 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 6dba1515023c..79331fb10aa5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -559,7 +559,6 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
559 tid_data->agg.txq_id = txq_id; 559 tid_data->agg.txq_id = txq_id;
560 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id); 560 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
561 561
562 tid_data = &trans->shrd->tid_data[sta_id][tid];
563 if (tid_data->tfds_in_queue == 0) { 562 if (tid_data->tfds_in_queue == 0) {
564 IWL_DEBUG_TX_QUEUES(trans, "HW queue is empty\n"); 563 IWL_DEBUG_TX_QUEUES(trans, "HW queue is empty\n");
565 tid_data->agg.state = IWL_AGG_ON; 564 tid_data->agg.state = IWL_AGG_ON;
@@ -1121,9 +1120,6 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1121 return 0; 1120 return 0;
1122 } 1121 }
1123 1122
1124 IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1125 q->read_ptr, index);
1126
1127 if (WARN_ON(!skb_queue_empty(skbs))) 1123 if (WARN_ON(!skb_queue_empty(skbs)))
1128 return 0; 1124 return 0;
1129 1125
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index a1a58330273f..66e1b9fa0b8b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -990,29 +990,16 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
990 return 0; 990 return 0;
991} 991}
992 992
993static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans) 993static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
994{ 994{
995 unsigned long flags; 995 unsigned long flags;
996 struct iwl_trans_pcie *trans_pcie = 996 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
997 IWL_TRANS_GET_PCIE_TRANS(trans);
998 997
998 /* tell the device to stop sending interrupts */
999 spin_lock_irqsave(&trans->shrd->lock, flags); 999 spin_lock_irqsave(&trans->shrd->lock, flags);
1000 iwl_disable_interrupts(trans); 1000 iwl_disable_interrupts(trans);
1001 spin_unlock_irqrestore(&trans->shrd->lock, flags); 1001 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1002 1002
1003 /* wait to make sure we flush pending tasklet*/
1004 synchronize_irq(bus(trans)->irq);
1005 tasklet_kill(&trans_pcie->irq_tasklet);
1006}
1007
1008static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1009{
1010 /* stop and reset the on-board processor */
1011 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1012
1013 /* tell the device to stop sending interrupts */
1014 iwl_trans_pcie_disable_sync_irq(trans);
1015
1016 /* device going down, Stop using ICT table */ 1003 /* device going down, Stop using ICT table */
1017 iwl_disable_ict(trans); 1004 iwl_disable_ict(trans);
1018 1005
@@ -1039,6 +1026,20 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1039 1026
1040 /* Stop the device, and put it in low power state */ 1027 /* Stop the device, and put it in low power state */
1041 iwl_apm_stop(priv(trans)); 1028 iwl_apm_stop(priv(trans));
1029
1030 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1031 * Clean again the interrupt here
1032 */
1033 spin_lock_irqsave(&trans->shrd->lock, flags);
1034 iwl_disable_interrupts(trans);
1035 spin_unlock_irqrestore(&trans->shrd->lock, flags);
1036
1037 /* wait to make sure we flush pending tasklet*/
1038 synchronize_irq(bus(trans)->irq);
1039 tasklet_kill(&trans_pcie->irq_tasklet);
1040
1041 /* stop and reset the on-board processor */
1042 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1042} 1043}
1043 1044
1044static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1045static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
@@ -1099,13 +1100,21 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1099 hdr->seq_ctrl = hdr->seq_ctrl & 1100 hdr->seq_ctrl = hdr->seq_ctrl &
1100 cpu_to_le16(IEEE80211_SCTL_FRAG); 1101 cpu_to_le16(IEEE80211_SCTL_FRAG);
1101 hdr->seq_ctrl |= cpu_to_le16(seq_number); 1102 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1102 seq_number += 0x10;
1103 /* aggregation is on for this <sta,tid> */ 1103 /* aggregation is on for this <sta,tid> */
1104 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1104 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1105 WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON); 1105 if (WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON)) {
1106 IWL_ERR(trans, "TX_CTL_AMPDU while not in AGG:"
1107 " Tx flags = 0x%08x, agg.state = %d",
1108 info->flags, tid_data->agg.state);
1109 IWL_ERR(trans, "sta_id = %d, tid = %d "
1110 "txq_id = %d, seq_num = %d", sta_id,
1111 tid, tid_data->agg.txq_id,
1112 seq_number >> 4);
1113 }
1106 txq_id = tid_data->agg.txq_id; 1114 txq_id = tid_data->agg.txq_id;
1107 is_agg = true; 1115 is_agg = true;
1108 } 1116 }
1117 seq_number += 0x10;
1109 } 1118 }
1110 1119
1111 /* Copy MAC header from skb into command buffer */ 1120 /* Copy MAC header from skb into command buffer */
@@ -1350,9 +1359,9 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1350 } 1359 }
1351 1360
1352 if (txq->q.read_ptr != tfd_num) { 1361 if (txq->q.read_ptr != tfd_num) {
1353 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim " 1362 IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
1354 "scd_ssn=%d idx=%d txq=%d swq=%d\n", 1363 txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
1355 ssn , tfd_num, txq_id, txq->swq_id); 1364 tfd_num, ssn);
1356 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); 1365 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1357 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond) 1366 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
1358 iwl_wake_queue(trans, txq, "Packets reclaimed"); 1367 iwl_wake_queue(trans, txq, "Packets reclaimed");
@@ -1364,6 +1373,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1364 1373
1365static void iwl_trans_pcie_free(struct iwl_trans *trans) 1374static void iwl_trans_pcie_free(struct iwl_trans *trans)
1366{ 1375{
1376 iwl_calib_free_results(trans);
1367 iwl_trans_pcie_tx_free(trans); 1377 iwl_trans_pcie_tx_free(trans);
1368 iwl_trans_pcie_rx_free(trans); 1378 iwl_trans_pcie_rx_free(trans);
1369 free_irq(bus(trans)->irq, trans); 1379 free_irq(bus(trans)->irq, trans);
@@ -1515,8 +1525,12 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
1515 if (time_after(jiffies, timeout)) { 1525 if (time_after(jiffies, timeout)) {
1516 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id, 1526 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
1517 hw_params(trans).wd_timeout); 1527 hw_params(trans).wd_timeout);
1518 IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n", 1528 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1519 q->read_ptr, q->write_ptr); 1529 q->read_ptr, q->write_ptr);
1530 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
1531 iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
1532 & (TFD_QUEUE_SIZE_MAX - 1),
1533 iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
1520 return 1; 1534 return 1;
1521 } 1535 }
1522 1536
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 50227ebc0ee2..f94a6ee5f82f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -220,11 +220,12 @@ struct fw_img {
220 struct fw_desc data; /* firmware data image */ 220 struct fw_desc data; /* firmware data image */
221}; 221};
222 222
223enum iwl_ucode_type { 223/* Opaque calibration results */
224 IWL_UCODE_NONE, 224struct iwl_calib_result {
225 IWL_UCODE_REGULAR, 225 struct list_head list;
226 IWL_UCODE_INIT, 226 size_t cmd_len;
227 IWL_UCODE_WOWLAN, 227 struct iwl_calib_hdr hdr;
228 /* data follows */
228}; 229};
229 230
230/** 231/**
@@ -236,6 +237,8 @@ enum iwl_ucode_type {
236 * @ucode_rt: run time ucode image 237 * @ucode_rt: run time ucode image
237 * @ucode_init: init ucode image 238 * @ucode_init: init ucode image
238 * @ucode_wowlan: wake on wireless ucode image (optional) 239 * @ucode_wowlan: wake on wireless ucode image (optional)
240 * @nvm_device_type: indicates OTP or eeprom
241 * @calib_results: list head for init calibration results
239 */ 242 */
240struct iwl_trans { 243struct iwl_trans {
241 const struct iwl_trans_ops *ops; 244 const struct iwl_trans_ops *ops;
@@ -250,6 +253,9 @@ struct iwl_trans {
250 /* eeprom related variables */ 253 /* eeprom related variables */
251 int nvm_device_type; 254 int nvm_device_type;
252 255
256 /* init calibration results */
257 struct list_head calib_results;
258
253 /* pointer to trans specific struct */ 259 /* pointer to trans specific struct */
254 /*Ensure that this pointer will always be aligned to sizeof pointer */ 260 /*Ensure that this pointer will always be aligned to sizeof pointer */
255 char trans_specific[0] __attribute__((__aligned__(sizeof(void *)))); 261 char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
@@ -386,4 +392,9 @@ int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
386 const void *data, size_t len); 392 const void *data, size_t len);
387void iwl_dealloc_ucode(struct iwl_trans *trans); 393void iwl_dealloc_ucode(struct iwl_trans *trans);
388 394
395int iwl_send_calib_results(struct iwl_trans *trans);
396int iwl_calib_set(struct iwl_trans *trans,
397 const struct iwl_calib_hdr *cmd, int len);
398void iwl_calib_free_results(struct iwl_trans *trans);
399
389#endif /* __iwl_trans_h__ */ 400#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index 9ec315b31d45..0577212ad3f3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -122,7 +122,7 @@ int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
122/* 122/*
123 * ucode 123 * ucode
124 */ 124 */
125static int iwlagn_load_section(struct iwl_trans *trans, const char *name, 125static int iwl_load_section(struct iwl_trans *trans, const char *name,
126 struct fw_desc *image, u32 dst_addr) 126 struct fw_desc *image, u32 dst_addr)
127{ 127{
128 struct iwl_bus *bus = bus(trans); 128 struct iwl_bus *bus = bus(trans);
@@ -188,7 +188,7 @@ static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
188 return NULL; 188 return NULL;
189} 189}
190 190
191static int iwlagn_load_given_ucode(struct iwl_trans *trans, 191static int iwl_load_given_ucode(struct iwl_trans *trans,
192 enum iwl_ucode_type ucode_type) 192 enum iwl_ucode_type ucode_type)
193{ 193{
194 int ret = 0; 194 int ret = 0;
@@ -201,36 +201,36 @@ static int iwlagn_load_given_ucode(struct iwl_trans *trans,
201 return -EINVAL; 201 return -EINVAL;
202 } 202 }
203 203
204 ret = iwlagn_load_section(trans, "INST", &image->code, 204 ret = iwl_load_section(trans, "INST", &image->code,
205 IWLAGN_RTC_INST_LOWER_BOUND); 205 IWLAGN_RTC_INST_LOWER_BOUND);
206 if (ret) 206 if (ret)
207 return ret; 207 return ret;
208 208
209 return iwlagn_load_section(trans, "DATA", &image->data, 209 return iwl_load_section(trans, "DATA", &image->data,
210 IWLAGN_RTC_DATA_LOWER_BOUND); 210 IWLAGN_RTC_DATA_LOWER_BOUND);
211} 211}
212 212
213/* 213/*
214 * Calibration 214 * Calibration
215 */ 215 */
216static int iwlagn_set_Xtal_calib(struct iwl_priv *priv) 216static int iwl_set_Xtal_calib(struct iwl_priv *priv)
217{ 217{
218 struct iwl_calib_xtal_freq_cmd cmd; 218 struct iwl_calib_xtal_freq_cmd cmd;
219 __le16 *xtal_calib = 219 __le16 *xtal_calib =
220 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL); 220 (__le16 *)iwl_eeprom_query_addr(priv->shrd, EEPROM_XTAL);
221 221
222 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); 222 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
223 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); 223 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
224 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); 224 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
225 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], 225 return iwl_calib_set(trans(priv), (void *)&cmd, sizeof(cmd));
226 (u8 *)&cmd, sizeof(cmd));
227} 226}
228 227
229static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv) 228static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
230{ 229{
231 struct iwl_calib_temperature_offset_cmd cmd; 230 struct iwl_calib_temperature_offset_cmd cmd;
232 __le16 *offset_calib = 231 __le16 *offset_calib =
233 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE); 232 (__le16 *)iwl_eeprom_query_addr(priv->shrd,
233 EEPROM_RAW_TEMPERATURE);
234 234
235 memset(&cmd, 0, sizeof(cmd)); 235 memset(&cmd, 0, sizeof(cmd));
236 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 236 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -240,22 +240,22 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
240 240
241 IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n", 241 IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
242 le16_to_cpu(cmd.radio_sensor_offset)); 242 le16_to_cpu(cmd.radio_sensor_offset));
243 return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET], 243 return iwl_calib_set(trans(priv), (void *)&cmd, sizeof(cmd));
244 (u8 *)&cmd, sizeof(cmd));
245} 244}
246 245
247static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv) 246static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
248{ 247{
249 struct iwl_calib_temperature_offset_v2_cmd cmd; 248 struct iwl_calib_temperature_offset_v2_cmd cmd;
250 __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv, 249 __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv->shrd,
251 EEPROM_KELVIN_TEMPERATURE); 250 EEPROM_KELVIN_TEMPERATURE);
252 __le16 *offset_calib_low = 251 __le16 *offset_calib_low =
253 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE); 252 (__le16 *)iwl_eeprom_query_addr(priv->shrd,
253 EEPROM_RAW_TEMPERATURE);
254 struct iwl_eeprom_calib_hdr *hdr; 254 struct iwl_eeprom_calib_hdr *hdr;
255 255
256 memset(&cmd, 0, sizeof(cmd)); 256 memset(&cmd, 0, sizeof(cmd));
257 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 257 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
258 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, 258 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv->shrd,
259 EEPROM_CALIB_ALL); 259 EEPROM_CALIB_ALL);
260 memcpy(&cmd.radio_sensor_offset_high, offset_calib_high, 260 memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
261 sizeof(*offset_calib_high)); 261 sizeof(*offset_calib_high));
@@ -276,11 +276,10 @@ static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
276 IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n", 276 IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
277 le16_to_cpu(cmd.burntVoltageRef)); 277 le16_to_cpu(cmd.burntVoltageRef));
278 278
279 return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET], 279 return iwl_calib_set(trans(priv), (void *)&cmd, sizeof(cmd));
280 (u8 *)&cmd, sizeof(cmd));
281} 280}
282 281
283static int iwlagn_send_calib_cfg(struct iwl_priv *priv) 282static int iwl_send_calib_cfg(struct iwl_trans *trans)
284{ 283{
285 struct iwl_calib_cfg_cmd calib_cfg_cmd; 284 struct iwl_calib_cfg_cmd calib_cfg_cmd;
286 struct iwl_host_cmd cmd = { 285 struct iwl_host_cmd cmd = {
@@ -296,7 +295,7 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
296 calib_cfg_cmd.ucd_calib_cfg.flags = 295 calib_cfg_cmd.ucd_calib_cfg.flags =
297 IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK; 296 IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
298 297
299 return iwl_trans_send_cmd(trans(priv), &cmd); 298 return iwl_trans_send_cmd(trans, &cmd);
300} 299}
301 300
302int iwlagn_rx_calib_result(struct iwl_priv *priv, 301int iwlagn_rx_calib_result(struct iwl_priv *priv,
@@ -306,37 +305,14 @@ int iwlagn_rx_calib_result(struct iwl_priv *priv,
306 struct iwl_rx_packet *pkt = rxb_addr(rxb); 305 struct iwl_rx_packet *pkt = rxb_addr(rxb);
307 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; 306 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
308 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 307 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
309 int index;
310 308
311 /* reduce the size of the length field itself */ 309 /* reduce the size of the length field itself */
312 len -= 4; 310 len -= 4;
313 311
314 /* Define the order in which the results will be sent to the runtime 312 if (iwl_calib_set(trans(priv), hdr, len))
315 * uCode. iwl_send_calib_results sends them in a row according to 313 IWL_ERR(priv, "Failed to record calibration data %d\n",
316 * their index. We sort them here 314 hdr->op_code);
317 */ 315
318 switch (hdr->op_code) {
319 case IWL_PHY_CALIBRATE_DC_CMD:
320 index = IWL_CALIB_DC;
321 break;
322 case IWL_PHY_CALIBRATE_LO_CMD:
323 index = IWL_CALIB_LO;
324 break;
325 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
326 index = IWL_CALIB_TX_IQ;
327 break;
328 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
329 index = IWL_CALIB_TX_IQ_PERD;
330 break;
331 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
332 index = IWL_CALIB_BASE_BAND;
333 break;
334 default:
335 IWL_ERR(priv, "Unknown calibration notification %d\n",
336 hdr->op_code);
337 return -1;
338 }
339 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
340 return 0; 316 return 0;
341} 317}
342 318
@@ -352,14 +328,14 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
352 * no need to close the envlope since we are going 328 * no need to close the envlope since we are going
353 * to load the runtime uCode later. 329 * to load the runtime uCode later.
354 */ 330 */
355 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN, 331 ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
356 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2); 332 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
357 if (ret) 333 if (ret)
358 return ret; 334 return ret;
359 335
360 } 336 }
361 337
362 ret = iwlagn_send_calib_cfg(priv); 338 ret = iwl_send_calib_cfg(trans(priv));
363 if (ret) 339 if (ret)
364 return ret; 340 return ret;
365 341
@@ -369,15 +345,15 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
369 */ 345 */
370 if (priv->cfg->need_temp_offset_calib) { 346 if (priv->cfg->need_temp_offset_calib) {
371 if (priv->cfg->temp_offset_v2) 347 if (priv->cfg->temp_offset_v2)
372 return iwlagn_set_temperature_offset_calib_v2(priv); 348 return iwl_set_temperature_offset_calib_v2(priv);
373 else 349 else
374 return iwlagn_set_temperature_offset_calib(priv); 350 return iwl_set_temperature_offset_calib(priv);
375 } 351 }
376 352
377 return 0; 353 return 0;
378} 354}
379 355
380static int iwlagn_send_wimax_coex(struct iwl_priv *priv) 356static int iwl_send_wimax_coex(struct iwl_priv *priv)
381{ 357{
382 struct iwl_wimax_coex_cmd coex_cmd; 358 struct iwl_wimax_coex_cmd coex_cmd;
383 359
@@ -405,7 +381,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
405 sizeof(coex_cmd), &coex_cmd); 381 sizeof(coex_cmd), &coex_cmd);
406} 382}
407 383
408static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = { 384static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
409 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | 385 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
410 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)), 386 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
411 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) | 387 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
@@ -427,42 +403,42 @@ static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
427 0, 0, 0, 0, 0, 0, 0 403 0, 0, 0, 0, 0, 0, 0
428}; 404};
429 405
430void iwlagn_send_prio_tbl(struct iwl_priv *priv) 406void iwl_send_prio_tbl(struct iwl_trans *trans)
431{ 407{
432 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd; 408 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
433 409
434 memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl, 410 memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
435 sizeof(iwlagn_bt_prio_tbl)); 411 sizeof(iwl_bt_prio_tbl));
436 if (iwl_trans_send_cmd_pdu(trans(priv), 412 if (iwl_trans_send_cmd_pdu(trans,
437 REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC, 413 REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
438 sizeof(prio_tbl_cmd), &prio_tbl_cmd)) 414 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
439 IWL_ERR(priv, "failed to send BT prio tbl command\n"); 415 IWL_ERR(trans, "failed to send BT prio tbl command\n");
440} 416}
441 417
442int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type) 418int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
443{ 419{
444 struct iwl_bt_coex_prot_env_cmd env_cmd; 420 struct iwl_bt_coex_prot_env_cmd env_cmd;
445 int ret; 421 int ret;
446 422
447 env_cmd.action = action; 423 env_cmd.action = action;
448 env_cmd.type = type; 424 env_cmd.type = type;
449 ret = iwl_trans_send_cmd_pdu(trans(priv), 425 ret = iwl_trans_send_cmd_pdu(trans,
450 REPLY_BT_COEX_PROT_ENV, CMD_SYNC, 426 REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
451 sizeof(env_cmd), &env_cmd); 427 sizeof(env_cmd), &env_cmd);
452 if (ret) 428 if (ret)
453 IWL_ERR(priv, "failed to send BT env command\n"); 429 IWL_ERR(trans, "failed to send BT env command\n");
454 return ret; 430 return ret;
455} 431}
456 432
457 433
458static int iwlagn_alive_notify(struct iwl_priv *priv) 434static int iwl_alive_notify(struct iwl_priv *priv)
459{ 435{
460 struct iwl_rxon_context *ctx; 436 struct iwl_rxon_context *ctx;
461 int ret; 437 int ret;
462 438
463 if (!priv->tx_cmd_pool) 439 if (!priv->tx_cmd_pool)
464 priv->tx_cmd_pool = 440 priv->tx_cmd_pool =
465 kmem_cache_create("iwlagn_dev_cmd", 441 kmem_cache_create("iwl_dev_cmd",
466 sizeof(struct iwl_device_cmd), 442 sizeof(struct iwl_device_cmd),
467 sizeof(void *), 0, NULL); 443 sizeof(void *), 0, NULL);
468 444
@@ -473,15 +449,17 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
473 for_each_context(priv, ctx) 449 for_each_context(priv, ctx)
474 ctx->last_tx_rejected = false; 450 ctx->last_tx_rejected = false;
475 451
476 ret = iwlagn_send_wimax_coex(priv); 452 ret = iwl_send_wimax_coex(priv);
477 if (ret) 453 if (ret)
478 return ret; 454 return ret;
479 455
480 ret = iwlagn_set_Xtal_calib(priv); 456 if (!priv->cfg->no_xtal_calib) {
481 if (ret) 457 ret = iwl_set_Xtal_calib(priv);
482 return ret; 458 if (ret)
459 return ret;
460 }
483 461
484 return iwl_send_calib_results(priv); 462 return iwl_send_calib_results(trans(priv));
485} 463}
486 464
487 465
@@ -572,7 +550,7 @@ struct iwlagn_alive_data {
572 u8 subtype; 550 u8 subtype;
573}; 551};
574 552
575static void iwlagn_alive_fn(struct iwl_priv *priv, 553static void iwl_alive_fn(struct iwl_trans *trans,
576 struct iwl_rx_packet *pkt, 554 struct iwl_rx_packet *pkt,
577 void *data) 555 void *data)
578{ 556{
@@ -581,20 +559,84 @@ static void iwlagn_alive_fn(struct iwl_priv *priv,
581 559
582 palive = &pkt->u.alive_frame; 560 palive = &pkt->u.alive_frame;
583 561
584 IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision " 562 IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
585 "0x%01X 0x%01X\n", 563 "0x%01X 0x%01X\n",
586 palive->is_valid, palive->ver_type, 564 palive->is_valid, palive->ver_type,
587 palive->ver_subtype); 565 palive->ver_subtype);
588 566
589 priv->device_pointers.error_event_table = 567 trans->shrd->device_pointers.error_event_table =
590 le32_to_cpu(palive->error_event_table_ptr); 568 le32_to_cpu(palive->error_event_table_ptr);
591 priv->device_pointers.log_event_table = 569 trans->shrd->device_pointers.log_event_table =
592 le32_to_cpu(palive->log_event_table_ptr); 570 le32_to_cpu(palive->log_event_table_ptr);
593 571
594 alive_data->subtype = palive->ver_subtype; 572 alive_data->subtype = palive->ver_subtype;
595 alive_data->valid = palive->is_valid == UCODE_VALID_OK; 573 alive_data->valid = palive->is_valid == UCODE_VALID_OK;
596} 574}
597 575
576/* notification wait support */
577void iwl_init_notification_wait(struct iwl_shared *shrd,
578 struct iwl_notification_wait *wait_entry,
579 u8 cmd,
580 void (*fn)(struct iwl_trans *trans,
581 struct iwl_rx_packet *pkt,
582 void *data),
583 void *fn_data)
584{
585 wait_entry->fn = fn;
586 wait_entry->fn_data = fn_data;
587 wait_entry->cmd = cmd;
588 wait_entry->triggered = false;
589 wait_entry->aborted = false;
590
591 spin_lock_bh(&shrd->notif_wait_lock);
592 list_add(&wait_entry->list, &shrd->notif_waits);
593 spin_unlock_bh(&shrd->notif_wait_lock);
594}
595
596int iwl_wait_notification(struct iwl_shared *shrd,
597 struct iwl_notification_wait *wait_entry,
598 unsigned long timeout)
599{
600 int ret;
601
602 ret = wait_event_timeout(shrd->notif_waitq,
603 wait_entry->triggered || wait_entry->aborted,
604 timeout);
605
606 spin_lock_bh(&shrd->notif_wait_lock);
607 list_del(&wait_entry->list);
608 spin_unlock_bh(&shrd->notif_wait_lock);
609
610 if (wait_entry->aborted)
611 return -EIO;
612
613 /* return value is always >= 0 */
614 if (ret <= 0)
615 return -ETIMEDOUT;
616 return 0;
617}
618
619void iwl_remove_notification(struct iwl_shared *shrd,
620 struct iwl_notification_wait *wait_entry)
621{
622 spin_lock_bh(&shrd->notif_wait_lock);
623 list_del(&wait_entry->list);
624 spin_unlock_bh(&shrd->notif_wait_lock);
625}
626
627void iwl_abort_notification_waits(struct iwl_shared *shrd)
628{
629 unsigned long flags;
630 struct iwl_notification_wait *wait_entry;
631
632 spin_lock_irqsave(&shrd->notif_wait_lock, flags);
633 list_for_each_entry(wait_entry, &shrd->notif_waits, list)
634 wait_entry->aborted = true;
635 spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
636
637 wake_up_all(&shrd->notif_waitq);
638}
639
598#define UCODE_ALIVE_TIMEOUT HZ 640#define UCODE_ALIVE_TIMEOUT HZ
599#define UCODE_CALIB_TIMEOUT (2*HZ) 641#define UCODE_CALIB_TIMEOUT (2*HZ)
600 642
@@ -603,41 +645,43 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
603{ 645{
604 struct iwl_notification_wait alive_wait; 646 struct iwl_notification_wait alive_wait;
605 struct iwlagn_alive_data alive_data; 647 struct iwlagn_alive_data alive_data;
648 struct iwl_trans *trans = trans(priv);
606 int ret; 649 int ret;
607 enum iwl_ucode_type old_type; 650 enum iwl_ucode_type old_type;
608 651
609 ret = iwl_trans_start_device(trans(priv)); 652 ret = iwl_trans_start_device(trans);
610 if (ret) 653 if (ret)
611 return ret; 654 return ret;
612 655
613 iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE, 656 iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
614 iwlagn_alive_fn, &alive_data); 657 iwl_alive_fn, &alive_data);
615 658
616 old_type = priv->ucode_type; 659 old_type = trans->shrd->ucode_type;
617 priv->ucode_type = ucode_type; 660 trans->shrd->ucode_type = ucode_type;
618 661
619 ret = iwlagn_load_given_ucode(trans(priv), ucode_type); 662 ret = iwl_load_given_ucode(trans, ucode_type);
620 if (ret) { 663 if (ret) {
621 priv->ucode_type = old_type; 664 trans->shrd->ucode_type = old_type;
622 iwlagn_remove_notification(priv, &alive_wait); 665 iwl_remove_notification(trans->shrd, &alive_wait);
623 return ret; 666 return ret;
624 } 667 }
625 668
626 iwl_trans_kick_nic(trans(priv)); 669 iwl_trans_kick_nic(trans);
627 670
628 /* 671 /*
629 * Some things may run in the background now, but we 672 * Some things may run in the background now, but we
630 * just wait for the ALIVE notification here. 673 * just wait for the ALIVE notification here.
631 */ 674 */
632 ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT); 675 ret = iwl_wait_notification(trans->shrd, &alive_wait,
676 UCODE_ALIVE_TIMEOUT);
633 if (ret) { 677 if (ret) {
634 priv->ucode_type = old_type; 678 trans->shrd->ucode_type = old_type;
635 return ret; 679 return ret;
636 } 680 }
637 681
638 if (!alive_data.valid) { 682 if (!alive_data.valid) {
639 IWL_ERR(priv, "Loaded ucode is not valid!\n"); 683 IWL_ERR(priv, "Loaded ucode is not valid!\n");
640 priv->ucode_type = old_type; 684 trans->shrd->ucode_type = old_type;
641 return -EIO; 685 return -EIO;
642 } 686 }
643 687
@@ -647,9 +691,9 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
647 * skip it for WoWLAN. 691 * skip it for WoWLAN.
648 */ 692 */
649 if (ucode_type != IWL_UCODE_WOWLAN) { 693 if (ucode_type != IWL_UCODE_WOWLAN) {
650 ret = iwl_verify_ucode(trans(priv), ucode_type); 694 ret = iwl_verify_ucode(trans, ucode_type);
651 if (ret) { 695 if (ret) {
652 priv->ucode_type = old_type; 696 trans->shrd->ucode_type = old_type;
653 return ret; 697 return ret;
654 } 698 }
655 699
@@ -657,11 +701,11 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
657 msleep(5); 701 msleep(5);
658 } 702 }
659 703
660 ret = iwlagn_alive_notify(priv); 704 ret = iwl_alive_notify(priv);
661 if (ret) { 705 if (ret) {
662 IWL_WARN(priv, 706 IWL_WARN(priv,
663 "Could not complete ALIVE transition: %d\n", ret); 707 "Could not complete ALIVE transition: %d\n", ret);
664 priv->ucode_type = old_type; 708 trans->shrd->ucode_type = old_type;
665 return ret; 709 return ret;
666 } 710 }
667 711
@@ -679,10 +723,10 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
679 if (!trans(priv)->ucode_init.code.len) 723 if (!trans(priv)->ucode_init.code.len)
680 return 0; 724 return 0;
681 725
682 if (priv->ucode_type != IWL_UCODE_NONE) 726 if (priv->shrd->ucode_type != IWL_UCODE_NONE)
683 return 0; 727 return 0;
684 728
685 iwlagn_init_notification_wait(priv, &calib_wait, 729 iwl_init_notification_wait(priv->shrd, &calib_wait,
686 CALIBRATION_COMPLETE_NOTIFICATION, 730 CALIBRATION_COMPLETE_NOTIFICATION,
687 NULL, NULL); 731 NULL, NULL);
688 732
@@ -699,12 +743,13 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
699 * Some things may run in the background now, but we 743 * Some things may run in the background now, but we
700 * just wait for the calibration complete notification. 744 * just wait for the calibration complete notification.
701 */ 745 */
702 ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT); 746 ret = iwl_wait_notification(priv->shrd, &calib_wait,
747 UCODE_CALIB_TIMEOUT);
703 748
704 goto out; 749 goto out;
705 750
706 error: 751 error:
707 iwlagn_remove_notification(priv, &calib_wait); 752 iwl_remove_notification(priv->shrd, &calib_wait);
708 out: 753 out:
709 /* Whatever happened, stop the device */ 754 /* Whatever happened, stop the device */
710 iwl_trans_stop_device(trans(priv)); 755 iwl_trans_stop_device(trans(priv));
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 50dee6a0a5ca..bd75078c454b 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -42,6 +42,7 @@
42#include <linux/ieee80211.h> 42#include <linux/ieee80211.h>
43#include <linux/sched.h> 43#include <linux/sched.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/moduleparam.h>
45 46
46#include "iwm.h" 47#include "iwm.h"
47#include "bus.h" 48#include "bus.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index 0a0cc9667cd6..87eef5773a02 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -25,6 +25,7 @@
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/bitops.h> 26#include <linux/bitops.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/export.h>
28 29
29#include "iwm.h" 30#include "iwm.h"
30#include "bus.h" 31#include "bus.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 362002735b12..98a179f98ea1 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -42,6 +42,7 @@
42#include <linux/ieee80211.h> 42#include <linux/ieee80211.h>
43#include <linux/wireless.h> 43#include <linux/wireless.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/moduleparam.h>
45 46
46#include "iwm.h" 47#include "iwm.h"
47#include "debug.h" 48#include "debug.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 56383e7be835..764b40dd24ad 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -63,6 +63,7 @@
63 */ 63 */
64 64
65#include <linux/kernel.h> 65#include <linux/kernel.h>
66#include <linux/module.h>
66#include <linux/slab.h> 67#include <linux/slab.h>
67#include <linux/netdevice.h> 68#include <linux/netdevice.h>
68#include <linux/debugfs.h> 69#include <linux/debugfs.h>
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 89f34ad8d34a..d1d84e0e30fc 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -635,7 +635,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
635 if (channel && 635 if (channel &&
636 !(channel->flags & IEEE80211_CHAN_DISABLED)) { 636 !(channel->flags & IEEE80211_CHAN_DISABLED)) {
637 bss = cfg80211_inform_bss(wiphy, channel, 637 bss = cfg80211_inform_bss(wiphy, channel,
638 bssid, le64_to_cpu(*(__le64 *)tsfdesc), 638 bssid, get_unaligned_le64(tsfdesc),
639 capa, intvl, ie, ielen, 639 capa, intvl, ie, ielen,
640 LBS_SCAN_RSSI_TO_MBM(rssi), 640 LBS_SCAN_RSSI_TO_MBM(rssi),
641 GFP_KERNEL); 641 GFP_KERNEL);
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index e08ab1de3d9d..d798bcc0d83a 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -8,6 +8,7 @@
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/if_arp.h> 10#include <linux/if_arp.h>
11#include <linux/export.h>
11 12
12#include "decl.h" 13#include "decl.h"
13#include "cfg.h" 14#include "cfg.h"
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 1af182778844..d8d8f0d0899f 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -5,6 +5,7 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/export.h>
8 9
9#include "decl.h" 10#include "decl.h"
10#include "cmd.h" 11#include "cmd.h"
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index c962e21762dc..9804ebc892d4 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -29,7 +29,7 @@
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 30
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/moduleparam.h> 32#include <linux/module.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/netdevice.h> 35#include <linux/netdevice.h>
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 622ae6de0d8b..50b1ee7721e9 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -21,7 +21,7 @@
21 21
22#include <linux/hardirq.h> 22#include <linux/hardirq.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/moduleparam.h> 24#include <linux/module.h>
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/jiffies.h> 26#include <linux/jiffies.h>
27#include <linux/list.h> 27#include <linux/list.h>
@@ -995,6 +995,7 @@ static int if_spi_host_to_card(struct lbs_private *priv,
995 spin_unlock_irqrestore(&card->buffer_lock, flags); 995 spin_unlock_irqrestore(&card->buffer_lock, flags);
996 break; 996 break;
997 default: 997 default:
998 kfree(packet);
998 netdev_err(priv->dev, "can't transfer buffer of type %d\n", 999 netdev_err(priv->dev, "can't transfer buffer of type %d\n",
999 type); 1000 type);
1000 err = -EINVAL; 1001 err = -EINVAL;
@@ -1290,7 +1291,6 @@ static struct spi_driver libertas_spi_driver = {
1290 .remove = __devexit_p(libertas_spi_remove), 1291 .remove = __devexit_p(libertas_spi_remove),
1291 .driver = { 1292 .driver = {
1292 .name = "libertas_spi", 1293 .name = "libertas_spi",
1293 .bus = &spi_bus_type,
1294 .owner = THIS_MODULE, 1294 .owner = THIS_MODULE,
1295 .pm = &if_spi_pm_ops, 1295 .pm = &if_spi_pm_ops,
1296 }, 1296 },
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 8147f1e2a0b0..db879c364ebf 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -5,7 +5,7 @@
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 6
7#include <linux/delay.h> 7#include <linux/delay.h>
8#include <linux/moduleparam.h> 8#include <linux/module.h>
9#include <linux/firmware.h> 9#include <linux/firmware.h>
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 39a6a7a40244..957681dede17 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -6,7 +6,7 @@
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 8
9#include <linux/moduleparam.h> 9#include <linux/module.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/hardirq.h> 12#include <linux/hardirq.h>
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 62e10eeadd7e..c7366b07b568 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -8,6 +8,7 @@
8#include <linux/hardirq.h> 8#include <linux/hardirq.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/export.h>
11#include <net/cfg80211.h> 12#include <net/cfg80211.h>
12 13
13#include "defs.h" 14#include "defs.h"
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 8f127520d786..c025f9c18282 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -5,6 +5,7 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/etherdevice.h> 6#include <linux/etherdevice.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/export.h>
8#include <net/cfg80211.h> 9#include <net/cfg80211.h>
9 10
10#include "host.h" 11#include "host.h"
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index 13557fe0bf95..909ac3685010 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/hardirq.h> 12#include <linux/hardirq.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/export.h>
14 15
15#include "libertas_tf.h" 16#include "libertas_tf.h"
16 17
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index ba7d96584cb6..68202e63873a 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -15,7 +15,7 @@
15#include "if_usb.h" 15#include "if_usb.h"
16 16
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/moduleparam.h> 18#include <linux/module.h>
19#include <linux/firmware.h> 19#include <linux/firmware.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index acc461aa385e..ceb51b6e6702 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14 14
15#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
16#include <linux/module.h>
16#include "libertas_tf.h" 17#include "libertas_tf.h"
17 18
18#define DRIVER_RELEASE_VERSION "004.p0" 19#define DRIVER_RELEASE_VERSION "004.p0"
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 477100d0b117..52bcdf40d5bd 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -26,6 +26,7 @@
26#include <linux/rtnetlink.h> 26#include <linux/rtnetlink.h>
27#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
28#include <linux/debugfs.h> 28#include <linux/debugfs.h>
29#include <linux/module.h>
29#include <net/genetlink.h> 30#include <net/genetlink.h>
30#include "mac80211_hwsim.h" 31#include "mac80211_hwsim.h"
31 32
@@ -36,7 +37,8 @@ MODULE_AUTHOR("Jouni Malinen");
36MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); 37MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
37MODULE_LICENSE("GPL"); 38MODULE_LICENSE("GPL");
38 39
39int wmediumd_pid; 40static u32 wmediumd_pid;
41
40static int radios = 2; 42static int radios = 2;
41module_param(radios, int, 0444); 43module_param(radios, int, 0444);
42MODULE_PARM_DESC(radios, "Number of simulated radios"); 44MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -664,7 +666,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
664{ 666{
665 bool ack; 667 bool ack;
666 struct ieee80211_tx_info *txi; 668 struct ieee80211_tx_info *txi;
667 int _pid; 669 u32 _pid;
668 670
669 mac80211_hwsim_monitor_rx(hw, skb); 671 mac80211_hwsim_monitor_rx(hw, skb);
670 672
@@ -675,7 +677,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
675 } 677 }
676 678
677 /* wmediumd mode check */ 679 /* wmediumd mode check */
678 _pid = wmediumd_pid; 680 _pid = ACCESS_ONCE(wmediumd_pid);
679 681
680 if (_pid) 682 if (_pid)
681 return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); 683 return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -763,7 +765,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
763 struct ieee80211_hw *hw = arg; 765 struct ieee80211_hw *hw = arg;
764 struct sk_buff *skb; 766 struct sk_buff *skb;
765 struct ieee80211_tx_info *info; 767 struct ieee80211_tx_info *info;
766 int _pid; 768 u32 _pid;
767 769
768 hwsim_check_magic(vif); 770 hwsim_check_magic(vif);
769 771
@@ -780,7 +782,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
780 mac80211_hwsim_monitor_rx(hw, skb); 782 mac80211_hwsim_monitor_rx(hw, skb);
781 783
782 /* wmediumd mode check */ 784 /* wmediumd mode check */
783 _pid = wmediumd_pid; 785 _pid = ACCESS_ONCE(wmediumd_pid);
784 786
785 if (_pid) 787 if (_pid)
786 return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); 788 return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -1253,7 +1255,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
1253 struct hwsim_vif_priv *vp = (void *)vif->drv_priv; 1255 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
1254 struct sk_buff *skb; 1256 struct sk_buff *skb;
1255 struct ieee80211_pspoll *pspoll; 1257 struct ieee80211_pspoll *pspoll;
1256 int _pid; 1258 u32 _pid;
1257 1259
1258 if (!vp->assoc) 1260 if (!vp->assoc)
1259 return; 1261 return;
@@ -1274,7 +1276,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
1274 memcpy(pspoll->ta, mac, ETH_ALEN); 1276 memcpy(pspoll->ta, mac, ETH_ALEN);
1275 1277
1276 /* wmediumd mode check */ 1278 /* wmediumd mode check */
1277 _pid = wmediumd_pid; 1279 _pid = ACCESS_ONCE(wmediumd_pid);
1278 1280
1279 if (_pid) 1281 if (_pid)
1280 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); 1282 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1291,7 +1293,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
1291 struct hwsim_vif_priv *vp = (void *)vif->drv_priv; 1293 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
1292 struct sk_buff *skb; 1294 struct sk_buff *skb;
1293 struct ieee80211_hdr *hdr; 1295 struct ieee80211_hdr *hdr;
1294 int _pid; 1296 u32 _pid;
1295 1297
1296 if (!vp->assoc) 1298 if (!vp->assoc)
1297 return; 1299 return;
@@ -1313,7 +1315,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
1313 memcpy(hdr->addr3, vp->bssid, ETH_ALEN); 1315 memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
1314 1316
1315 /* wmediumd mode check */ 1317 /* wmediumd mode check */
1316 _pid = wmediumd_pid; 1318 _pid = ACCESS_ONCE(wmediumd_pid);
1317 1319
1318 if (_pid) 1320 if (_pid)
1319 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); 1321 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1633,8 +1635,6 @@ static int hwsim_init_netlink(void)
1633 int rc; 1635 int rc;
1634 printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); 1636 printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
1635 1637
1636 wmediumd_pid = 0;
1637
1638 rc = genl_register_family_with_ops(&hwsim_genl_family, 1638 rc = genl_register_family_with_ops(&hwsim_genl_family,
1639 hwsim_ops, ARRAY_SIZE(hwsim_ops)); 1639 hwsim_ops, ARRAY_SIZE(hwsim_ops));
1640 if (rc) 1640 if (rc)
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e9ab9a3fbe9c..787dbe2aa408 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -120,10 +120,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
120static int 120static int
121mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, 121mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
122 enum nl80211_tx_power_setting type, 122 enum nl80211_tx_power_setting type,
123 int dbm) 123 int mbm)
124{ 124{
125 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); 125 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
126 struct mwifiex_power_cfg power_cfg; 126 struct mwifiex_power_cfg power_cfg;
127 int dbm = MBM_TO_DBM(mbm);
127 128
128 if (type == NL80211_TX_POWER_FIXED) { 129 if (type == NL80211_TX_POWER_FIXED) {
129 power_cfg.is_power_auto = 0; 130 power_cfg.is_power_auto = 0;
@@ -750,17 +751,13 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
750{ 751{
751 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 752 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
752 753
753 if (priv->disconnect)
754 return -EBUSY;
755
756 priv->disconnect = 1;
757 if (mwifiex_deauthenticate(priv, NULL)) 754 if (mwifiex_deauthenticate(priv, NULL))
758 return -EFAULT; 755 return -EFAULT;
759 756
760 wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" 757 wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
761 " reason code %d\n", priv->cfg_bssid, reason_code); 758 " reason code %d\n", priv->cfg_bssid, reason_code);
762 759
763 queue_work(priv->workqueue, &priv->cfg_workqueue); 760 memset(priv->cfg_bssid, 0, ETH_ALEN);
764 761
765 return 0; 762 return 0;
766} 763}
@@ -980,27 +977,32 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
980 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 977 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
981 int ret = 0; 978 int ret = 0;
982 979
983 if (priv->assoc_request)
984 return -EBUSY;
985
986 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { 980 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
987 wiphy_err(wiphy, "received infra assoc request " 981 wiphy_err(wiphy, "received infra assoc request "
988 "when station is in ibss mode\n"); 982 "when station is in ibss mode\n");
989 goto done; 983 goto done;
990 } 984 }
991 985
992 priv->assoc_request = -EINPROGRESS;
993
994 wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", 986 wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
995 (char *) sme->ssid, sme->bssid); 987 (char *) sme->ssid, sme->bssid);
996 988
997 ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, 989 ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
998 priv->bss_mode, sme->channel, sme, 0); 990 priv->bss_mode, sme->channel, sme, 0);
999
1000 priv->assoc_request = 1;
1001done: 991done:
1002 priv->assoc_result = ret; 992 if (!ret) {
1003 queue_work(priv->workqueue, &priv->cfg_workqueue); 993 cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
994 NULL, 0, WLAN_STATUS_SUCCESS,
995 GFP_KERNEL);
996 dev_dbg(priv->adapter->dev,
997 "info: associated to bssid %pM successfully\n",
998 priv->cfg_bssid);
999 } else {
1000 dev_dbg(priv->adapter->dev,
1001 "info: association to bssid %pM failed\n",
1002 priv->cfg_bssid);
1003 memset(priv->cfg_bssid, 0, ETH_ALEN);
1004 }
1005
1004 return ret; 1006 return ret;
1005} 1007}
1006 1008
@@ -1017,28 +1019,29 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1017 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); 1019 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
1018 int ret = 0; 1020 int ret = 0;
1019 1021
1020 if (priv->ibss_join_request)
1021 return -EBUSY;
1022
1023 if (priv->bss_mode != NL80211_IFTYPE_ADHOC) { 1022 if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
1024 wiphy_err(wiphy, "request to join ibss received " 1023 wiphy_err(wiphy, "request to join ibss received "
1025 "when station is not in ibss mode\n"); 1024 "when station is not in ibss mode\n");
1026 goto done; 1025 goto done;
1027 } 1026 }
1028 1027
1029 priv->ibss_join_request = -EINPROGRESS;
1030
1031 wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n", 1028 wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
1032 (char *) params->ssid, params->bssid); 1029 (char *) params->ssid, params->bssid);
1033 1030
1034 ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid, 1031 ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
1035 params->bssid, priv->bss_mode, 1032 params->bssid, priv->bss_mode,
1036 params->channel, NULL, params->privacy); 1033 params->channel, NULL, params->privacy);
1037
1038 priv->ibss_join_request = 1;
1039done: 1034done:
1040 priv->ibss_join_result = ret; 1035 if (!ret) {
1041 queue_work(priv->workqueue, &priv->cfg_workqueue); 1036 cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
1037 dev_dbg(priv->adapter->dev,
1038 "info: joined/created adhoc network with bssid"
1039 " %pM successfully\n", priv->cfg_bssid);
1040 } else {
1041 dev_dbg(priv->adapter->dev,
1042 "info: failed creating/joining adhoc network\n");
1043 }
1044
1042 return ret; 1045 return ret;
1043} 1046}
1044 1047
@@ -1053,17 +1056,12 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
1053{ 1056{
1054 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); 1057 struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
1055 1058
1056 if (priv->disconnect)
1057 return -EBUSY;
1058
1059 priv->disconnect = 1;
1060
1061 wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n", 1059 wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
1062 priv->cfg_bssid); 1060 priv->cfg_bssid);
1063 if (mwifiex_deauthenticate(priv, NULL)) 1061 if (mwifiex_deauthenticate(priv, NULL))
1064 return -EFAULT; 1062 return -EFAULT;
1065 1063
1066 queue_work(priv->workqueue, &priv->cfg_workqueue); 1064 memset(priv->cfg_bssid, 0, ETH_ALEN);
1067 1065
1068 return 0; 1066 return 0;
1069} 1067}
@@ -1080,15 +1078,42 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
1080 struct cfg80211_scan_request *request) 1078 struct cfg80211_scan_request *request)
1081{ 1079{
1082 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1080 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
1081 int i;
1082 struct ieee80211_channel *chan;
1083 1083
1084 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); 1084 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
1085 1085
1086 if (priv->scan_request && priv->scan_request != request)
1087 return -EBUSY;
1088
1089 priv->scan_request = request; 1086 priv->scan_request = request;
1090 1087
1091 queue_work(priv->workqueue, &priv->cfg_workqueue); 1088 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
1089 GFP_KERNEL);
1090 if (!priv->user_scan_cfg) {
1091 dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
1092 return -ENOMEM;
1093 }
1094 for (i = 0; i < request->n_ssids; i++) {
1095 memcpy(priv->user_scan_cfg->ssid_list[i].ssid,
1096 request->ssids[i].ssid, request->ssids[i].ssid_len);
1097 priv->user_scan_cfg->ssid_list[i].max_len =
1098 request->ssids[i].ssid_len;
1099 }
1100 for (i = 0; i < request->n_channels; i++) {
1101 chan = request->channels[i];
1102 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
1103 priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
1104
1105 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
1106 priv->user_scan_cfg->chan_list[i].scan_type =
1107 MWIFIEX_SCAN_TYPE_PASSIVE;
1108 else
1109 priv->user_scan_cfg->chan_list[i].scan_type =
1110 MWIFIEX_SCAN_TYPE_ACTIVE;
1111
1112 priv->user_scan_cfg->chan_list[i].scan_time = 0;
1113 }
1114 if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
1115 return -EFAULT;
1116
1092 return 0; 1117 return 0;
1093} 1118}
1094 1119
@@ -1294,10 +1319,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
1294 1319
1295 priv->media_connected = false; 1320 priv->media_connected = false;
1296 1321
1297 cancel_work_sync(&priv->cfg_workqueue);
1298 flush_workqueue(priv->workqueue);
1299 destroy_workqueue(priv->workqueue);
1300
1301 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; 1322 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
1302 1323
1303 return 0; 1324 return 0;
@@ -1375,9 +1396,6 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1375 memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN); 1396 memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
1376 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 1397 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1377 1398
1378 /* We are using custom domains */
1379 wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
1380
1381 /* Reserve space for bss band information */ 1399 /* Reserve space for bss band information */
1382 wdev->wiphy->bss_priv_size = sizeof(u8); 1400 wdev->wiphy->bss_priv_size = sizeof(u8);
1383 1401
@@ -1406,100 +1424,3 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1406 1424
1407 return ret; 1425 return ret;
1408} 1426}
1409
1410/*
1411 * This function handles the result of different pending network operations.
1412 *
1413 * The following operations are handled and CFG802.11 subsystem is
1414 * notified accordingly -
1415 * - Scan request completion
1416 * - Association request completion
1417 * - IBSS join request completion
1418 * - Disconnect request completion
1419 */
1420void
1421mwifiex_cfg80211_results(struct work_struct *work)
1422{
1423 struct mwifiex_private *priv =
1424 container_of(work, struct mwifiex_private, cfg_workqueue);
1425 struct mwifiex_user_scan_cfg *scan_req;
1426 int ret = 0, i;
1427 struct ieee80211_channel *chan;
1428
1429 if (priv->scan_request) {
1430 scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
1431 GFP_KERNEL);
1432 if (!scan_req) {
1433 dev_err(priv->adapter->dev, "failed to alloc "
1434 "scan_req\n");
1435 return;
1436 }
1437 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1438 memcpy(scan_req->ssid_list[i].ssid,
1439 priv->scan_request->ssids[i].ssid,
1440 priv->scan_request->ssids[i].ssid_len);
1441 scan_req->ssid_list[i].max_len =
1442 priv->scan_request->ssids[i].ssid_len;
1443 }
1444 for (i = 0; i < priv->scan_request->n_channels; i++) {
1445 chan = priv->scan_request->channels[i];
1446 scan_req->chan_list[i].chan_number = chan->hw_value;
1447 scan_req->chan_list[i].radio_type = chan->band;
1448 if (chan->flags & IEEE80211_CHAN_DISABLED)
1449 scan_req->chan_list[i].scan_type =
1450 MWIFIEX_SCAN_TYPE_PASSIVE;
1451 else
1452 scan_req->chan_list[i].scan_type =
1453 MWIFIEX_SCAN_TYPE_ACTIVE;
1454 scan_req->chan_list[i].scan_time = 0;
1455 }
1456 if (mwifiex_set_user_scan_ioctl(priv, scan_req))
1457 ret = -EFAULT;
1458 priv->scan_result_status = ret;
1459 dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
1460 __func__);
1461 cfg80211_scan_done(priv->scan_request,
1462 (priv->scan_result_status < 0));
1463 priv->scan_request = NULL;
1464 kfree(scan_req);
1465 }
1466
1467 if (priv->assoc_request == 1) {
1468 if (!priv->assoc_result) {
1469 cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
1470 NULL, 0, NULL, 0,
1471 WLAN_STATUS_SUCCESS,
1472 GFP_KERNEL);
1473 dev_dbg(priv->adapter->dev,
1474 "info: associated to bssid %pM successfully\n",
1475 priv->cfg_bssid);
1476 } else {
1477 dev_dbg(priv->adapter->dev,
1478 "info: association to bssid %pM failed\n",
1479 priv->cfg_bssid);
1480 memset(priv->cfg_bssid, 0, ETH_ALEN);
1481 }
1482 priv->assoc_request = 0;
1483 priv->assoc_result = 0;
1484 }
1485
1486 if (priv->ibss_join_request == 1) {
1487 if (!priv->ibss_join_result) {
1488 cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
1489 GFP_KERNEL);
1490 dev_dbg(priv->adapter->dev,
1491 "info: joined/created adhoc network with bssid"
1492 " %pM successfully\n", priv->cfg_bssid);
1493 } else {
1494 dev_dbg(priv->adapter->dev,
1495 "info: failed creating/joining adhoc network\n");
1496 }
1497 priv->ibss_join_request = 0;
1498 priv->ibss_join_result = 0;
1499 }
1500
1501 if (priv->disconnect) {
1502 memset(priv->cfg_bssid, 0, ETH_ALEN);
1503 priv->disconnect = 0;
1504 }
1505}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
index 8d010f2500c5..76c76c60438b 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -26,5 +26,4 @@
26 26
27int mwifiex_register_cfg80211(struct mwifiex_private *); 27int mwifiex_register_cfg80211(struct mwifiex_private *);
28 28
29void mwifiex_cfg80211_results(struct work_struct *work);
30#endif 29#endif
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ac278156d390..6e0a3eaecf70 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
939{ 939{
940 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; 940 struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
941 unsigned long cmd_flags; 941 unsigned long cmd_flags;
942 unsigned long cmd_pending_q_flags;
943 unsigned long scan_pending_q_flags; 942 unsigned long scan_pending_q_flags;
944 uint16_t cancel_scan_cmd = false; 943 uint16_t cancel_scan_cmd = false;
945 944
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
949 cmd_node = adapter->curr_cmd; 948 cmd_node = adapter->curr_cmd;
950 cmd_node->wait_q_enabled = false; 949 cmd_node->wait_q_enabled = false;
951 cmd_node->cmd_flag |= CMD_F_CANCELED; 950 cmd_node->cmd_flag |= CMD_F_CANCELED;
952 spin_lock_irqsave(&adapter->cmd_pending_q_lock,
953 cmd_pending_q_flags);
954 list_del(&cmd_node->list);
955 spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
956 cmd_pending_q_flags);
957 mwifiex_insert_cmd_to_free_q(adapter, cmd_node); 951 mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
952 mwifiex_complete_cmd(adapter, adapter->curr_cmd);
953 adapter->curr_cmd = NULL;
958 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); 954 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
959 } 955 }
960 956
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
981 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); 977 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
982 } 978 }
983 adapter->cmd_wait_q.status = -1; 979 adapter->cmd_wait_q.status = -1;
984 mwifiex_complete_cmd(adapter, adapter->curr_cmd);
985} 980}
986 981
987/* 982/*
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 26940455255b..244c728ef9dc 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -283,6 +283,45 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
283} 283}
284 284
285/* 285/*
286 * This function sets trans_start per tx_queue
287 */
288void mwifiex_set_trans_start(struct net_device *dev)
289{
290 int i;
291
292 for (i = 0; i < dev->num_tx_queues; i++)
293 netdev_get_tx_queue(dev, i)->trans_start = jiffies;
294
295 dev->trans_start = jiffies;
296}
297
298/*
299 * This function wakes up all queues in net_device
300 */
301void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
302 struct mwifiex_adapter *adapter)
303{
304 unsigned long dev_queue_flags;
305
306 spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
307 netif_tx_wake_all_queues(netdev);
308 spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
309}
310
311/*
312 * This function stops all queues in net_device
313 */
314void mwifiex_stop_net_dev_queue(struct net_device *netdev,
315 struct mwifiex_adapter *adapter)
316{
317 unsigned long dev_queue_flags;
318
319 spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
320 netif_tx_stop_all_queues(netdev);
321 spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
322}
323
324/*
286 * This function releases the lock variables and frees the locks and 325 * This function releases the lock variables and frees the locks and
287 * associated locks. 326 * associated locks.
288 */ 327 */
@@ -359,6 +398,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
359 spin_lock_init(&adapter->int_lock); 398 spin_lock_init(&adapter->int_lock);
360 spin_lock_init(&adapter->main_proc_lock); 399 spin_lock_init(&adapter->main_proc_lock);
361 spin_lock_init(&adapter->mwifiex_cmd_lock); 400 spin_lock_init(&adapter->mwifiex_cmd_lock);
401 spin_lock_init(&adapter->queue_lock);
362 for (i = 0; i < adapter->priv_num; i++) { 402 for (i = 0; i < adapter->priv_num; i++) {
363 if (adapter->priv[i]) { 403 if (adapter->priv[i]) {
364 priv = adapter->priv[i]; 404 priv = adapter->priv[i];
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 67e6db7d672d..84be196188cc 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -401,7 +401,7 @@ mwifiex_fill_buffer(struct sk_buff *skb)
401static int 401static int
402mwifiex_open(struct net_device *dev) 402mwifiex_open(struct net_device *dev)
403{ 403{
404 netif_start_queue(dev); 404 netif_tx_start_all_queues(dev);
405 return 0; 405 return 0;
406} 406}
407 407
@@ -465,8 +465,8 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
465 atomic_inc(&priv->adapter->tx_pending); 465 atomic_inc(&priv->adapter->tx_pending);
466 466
467 if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) { 467 if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
468 netif_stop_queue(priv->netdev); 468 mwifiex_set_trans_start(dev);
469 dev->trans_start = jiffies; 469 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
470 } 470 }
471 471
472 queue_work(priv->adapter->workqueue, &priv->adapter->main_work); 472 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
@@ -533,7 +533,7 @@ mwifiex_tx_timeout(struct net_device *dev)
533 533
534 dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n", 534 dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
535 jiffies, priv->bss_index); 535 jiffies, priv->bss_index);
536 dev->trans_start = jiffies; 536 mwifiex_set_trans_start(dev);
537 priv->num_tx_timeout++; 537 priv->num_tx_timeout++;
538} 538}
539 539
@@ -586,8 +586,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
586 priv->media_connected = false; 586 priv->media_connected = false;
587 memset(&priv->nick_name, 0, sizeof(priv->nick_name)); 587 memset(&priv->nick_name, 0, sizeof(priv->nick_name));
588 priv->num_tx_timeout = 0; 588 priv->num_tx_timeout = 0;
589 priv->workqueue = create_singlethread_workqueue("cfg80211_wq");
590 INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results);
591 memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); 589 memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
592} 590}
593 591
@@ -793,7 +791,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
793 priv = adapter->priv[i]; 791 priv = adapter->priv[i];
794 if (priv && priv->netdev) { 792 if (priv && priv->netdev) {
795 if (!netif_queue_stopped(priv->netdev)) 793 if (!netif_queue_stopped(priv->netdev))
796 netif_stop_queue(priv->netdev); 794 mwifiex_stop_net_dev_queue(priv->netdev,
795 adapter);
797 if (netif_carrier_ok(priv->netdev)) 796 if (netif_carrier_ok(priv->netdev))
798 netif_carrier_off(priv->netdev); 797 netif_carrier_off(priv->netdev);
799 } 798 }
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 3861a617c0e1..9207fc64641e 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -453,15 +453,8 @@ struct mwifiex_private {
453 u8 scan_pending_on_block; 453 u8 scan_pending_on_block;
454 u8 report_scan_result; 454 u8 report_scan_result;
455 struct cfg80211_scan_request *scan_request; 455 struct cfg80211_scan_request *scan_request;
456 int scan_result_status; 456 struct mwifiex_user_scan_cfg *user_scan_cfg;
457 int assoc_request;
458 u16 assoc_result;
459 int ibss_join_request;
460 u16 ibss_join_result;
461 bool disconnect;
462 u8 cfg_bssid[6]; 457 u8 cfg_bssid[6];
463 struct workqueue_struct *workqueue;
464 struct work_struct cfg_workqueue;
465 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 458 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
466 struct wps wps; 459 struct wps wps;
467 u8 scan_block; 460 u8 scan_block;
@@ -655,10 +648,19 @@ struct mwifiex_adapter {
655 struct mwifiex_wait_queue cmd_wait_q; 648 struct mwifiex_wait_queue cmd_wait_q;
656 u8 scan_wait_q_woken; 649 u8 scan_wait_q_woken;
657 struct cmd_ctrl_node *cmd_queued; 650 struct cmd_ctrl_node *cmd_queued;
651 spinlock_t queue_lock; /* lock for tx queues */
658}; 652};
659 653
660int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 654int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
661 655
656void mwifiex_set_trans_start(struct net_device *dev);
657
658void mwifiex_stop_net_dev_queue(struct net_device *netdev,
659 struct mwifiex_adapter *adapter);
660
661void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
662 struct mwifiex_adapter *adapter);
663
662int mwifiex_init_fw(struct mwifiex_adapter *adapter); 664int mwifiex_init_fw(struct mwifiex_adapter *adapter);
663 665
664int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter); 666int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index a2f32008f9a8..405350940a45 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -386,7 +386,7 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
386 card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL); 386 card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
387 if (!card->txbd_ring_vbase) { 387 if (!card->txbd_ring_vbase) {
388 dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n"); 388 dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
389 return -1; 389 return -ENOMEM;
390 } 390 }
391 card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase); 391 card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
392 392
@@ -476,7 +476,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
476 if (!card->rxbd_ring_vbase) { 476 if (!card->rxbd_ring_vbase) {
477 dev_err(adapter->dev, "Unable to allocate buffer for " 477 dev_err(adapter->dev, "Unable to allocate buffer for "
478 "rxbd_ring.\n"); 478 "rxbd_ring.\n");
479 return -1; 479 return -ENOMEM;
480 } 480 }
481 card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase); 481 card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
482 482
@@ -569,7 +569,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
569 if (!card->evtbd_ring_vbase) { 569 if (!card->evtbd_ring_vbase) {
570 dev_err(adapter->dev, "Unable to allocate buffer. " 570 dev_err(adapter->dev, "Unable to allocate buffer. "
571 "Terminating download\n"); 571 "Terminating download\n");
572 return -1; 572 return -ENOMEM;
573 } 573 }
574 card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase); 574 card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
575 575
@@ -1231,15 +1231,13 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
1231 if (rdptr >= MWIFIEX_MAX_EVT_BD) { 1231 if (rdptr >= MWIFIEX_MAX_EVT_BD) {
1232 dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n", 1232 dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
1233 rdptr); 1233 rdptr);
1234 ret = -EINVAL; 1234 return -EINVAL;
1235 goto done;
1236 } 1235 }
1237 1236
1238 /* Read the event ring write pointer set by firmware */ 1237 /* Read the event ring write pointer set by firmware */
1239 if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) { 1238 if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
1240 dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n"); 1239 dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n");
1241 ret = -1; 1240 return -1;
1242 goto done;
1243 } 1241 }
1244 1242
1245 if (!card->evt_buf_list[rdptr]) { 1243 if (!card->evt_buf_list[rdptr]) {
@@ -1268,15 +1266,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
1268 /* Write the event ring read pointer in to REG_EVTBD_RDPTR */ 1266 /* Write the event ring read pointer in to REG_EVTBD_RDPTR */
1269 if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) { 1267 if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
1270 dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n"); 1268 dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n");
1271 ret = -1; 1269 return -1;
1272 goto done;
1273 } 1270 }
1274 1271
1275done:
1276 /* Free the buffer for failure case */
1277 if (ret && skb)
1278 dev_kfree_skb_any(skb);
1279
1280 dev_dbg(adapter->dev, "info: Check Events Again\n"); 1272 dev_dbg(adapter->dev, "info: Check Events Again\n");
1281 ret = mwifiex_pcie_process_event_ready(adapter); 1273 ret = mwifiex_pcie_process_event_ready(adapter);
1282 1274
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8a18bcc23b26..e2e715666bca 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -819,8 +819,10 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv,
819 wildcard_ssid_tlv->header.len = cpu_to_le16( 819 wildcard_ssid_tlv->header.len = cpu_to_le16(
820 (u16) (ssid_len + sizeof(wildcard_ssid_tlv-> 820 (u16) (ssid_len + sizeof(wildcard_ssid_tlv->
821 max_ssid_length))); 821 max_ssid_length)));
822 wildcard_ssid_tlv->max_ssid_length = 822
823 user_scan_in->ssid_list[ssid_idx].max_len; 823 /* max_ssid_length = 0 tells firmware to perform
824 specific scan for the SSID filled */
825 wildcard_ssid_tlv->max_ssid_length = 0;
824 826
825 memcpy(wildcard_ssid_tlv->ssid, 827 memcpy(wildcard_ssid_tlv->ssid,
826 user_scan_in->ssid_list[ssid_idx].ssid, 828 user_scan_in->ssid_list[ssid_idx].ssid,
@@ -1389,11 +1391,8 @@ int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
1389{ 1391{
1390 int status; 1392 int status;
1391 1393
1392 priv->adapter->scan_wait_q_woken = false;
1393
1394 status = mwifiex_scan_networks(priv, scan_req); 1394 status = mwifiex_scan_networks(priv, scan_req);
1395 if (!status) 1395 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
1396 status = mwifiex_wait_queue_complete(priv->adapter);
1397 1396
1398 return status; 1397 return status;
1399} 1398}
@@ -1794,6 +1793,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1794 up(&priv->async_sem); 1793 up(&priv->async_sem);
1795 } 1794 }
1796 1795
1796 if (priv->user_scan_cfg) {
1797 dev_dbg(priv->adapter->dev, "info: %s: sending scan "
1798 "results\n", __func__);
1799 cfg80211_scan_done(priv->scan_request, 0);
1800 priv->scan_request = NULL;
1801 kfree(priv->user_scan_cfg);
1802 priv->user_scan_cfg = NULL;
1803 }
1797 } else { 1804 } else {
1798 /* Get scan command from scan_pending_q and put to 1805 /* Get scan command from scan_pending_q and put to
1799 cmd_pending_q */ 1806 cmd_pending_q */
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 702452b505c3..d39d8457f252 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -1087,7 +1087,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
1087 (adapter->ioport | 0x1000 | 1087 (adapter->ioport | 0x1000 |
1088 (card->mpa_rx.ports << 4)) + 1088 (card->mpa_rx.ports << 4)) +
1089 card->mpa_rx.start_port, 1)) 1089 card->mpa_rx.start_port, 1))
1090 return -1; 1090 goto error;
1091 1091
1092 curr_ptr = card->mpa_rx.buf; 1092 curr_ptr = card->mpa_rx.buf;
1093 1093
@@ -1130,12 +1130,29 @@ rx_curr_single:
1130 if (mwifiex_sdio_card_to_host(adapter, &pkt_type, 1130 if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
1131 skb->data, skb->len, 1131 skb->data, skb->len,
1132 adapter->ioport + port)) 1132 adapter->ioport + port))
1133 return -1; 1133 goto error;
1134 1134
1135 mwifiex_decode_rx_packet(adapter, skb, pkt_type); 1135 mwifiex_decode_rx_packet(adapter, skb, pkt_type);
1136 } 1136 }
1137 1137
1138 return 0; 1138 return 0;
1139
1140error:
1141 if (MP_RX_AGGR_IN_PROGRESS(card)) {
1142 /* Multiport-aggregation transfer failed - cleanup */
1143 for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
1144 /* copy pkt to deaggr buf */
1145 skb_deaggr = card->mpa_rx.skb_arr[pind];
1146 dev_kfree_skb_any(skb_deaggr);
1147 }
1148 MP_RX_AGGR_BUF_RESET(card);
1149 }
1150
1151 if (f_do_rx_cur)
1152 /* Single transfer pending. Free curr buff also */
1153 dev_kfree_skb_any(skb);
1154
1155 return -1;
1139} 1156}
1140 1157
1141/* 1158/*
@@ -1271,7 +1288,6 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
1271 1288
1272 dev_dbg(adapter->dev, 1289 dev_dbg(adapter->dev,
1273 "info: CFG reg val =%x\n", cr); 1290 "info: CFG reg val =%x\n", cr);
1274 dev_kfree_skb_any(skb);
1275 return -1; 1291 return -1;
1276 } 1292 }
1277 } 1293 }
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f204810e8338..d7aa21da84d0 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -115,18 +115,17 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
115 if (adapter->num_cmd_timeout && adapter->curr_cmd) 115 if (adapter->num_cmd_timeout && adapter->curr_cmd)
116 return; 116 return;
117 priv->media_connected = false; 117 priv->media_connected = false;
118 if (!priv->disconnect) { 118 dev_dbg(adapter->dev, "info: successfully disconnected from"
119 priv->disconnect = 1; 119 " %pM: reason code %d\n", priv->cfg_bssid,
120 dev_dbg(adapter->dev, "info: successfully disconnected from" 120 WLAN_REASON_DEAUTH_LEAVING);
121 " %pM: reason code %d\n", priv->cfg_bssid, 121 if (priv->bss_mode == NL80211_IFTYPE_STATION) {
122 WLAN_REASON_DEAUTH_LEAVING); 122 cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING,
123 cfg80211_disconnected(priv->netdev, 123 NULL, 0, GFP_KERNEL);
124 WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
125 GFP_KERNEL);
126 queue_work(priv->workqueue, &priv->cfg_workqueue);
127 } 124 }
125 memset(priv->cfg_bssid, 0, ETH_ALEN);
126
128 if (!netif_queue_stopped(priv->netdev)) 127 if (!netif_queue_stopped(priv->netdev))
129 netif_stop_queue(priv->netdev); 128 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
130 if (netif_carrier_ok(priv->netdev)) 129 if (netif_carrier_ok(priv->netdev))
131 netif_carrier_off(priv->netdev); 130 netif_carrier_off(priv->netdev);
132 /* Reset wireless stats signal info */ 131 /* Reset wireless stats signal info */
@@ -201,7 +200,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
201 if (!netif_carrier_ok(priv->netdev)) 200 if (!netif_carrier_ok(priv->netdev))
202 netif_carrier_on(priv->netdev); 201 netif_carrier_on(priv->netdev);
203 if (netif_queue_stopped(priv->netdev)) 202 if (netif_queue_stopped(priv->netdev))
204 netif_wake_queue(priv->netdev); 203 mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
205 break; 204 break;
206 205
207 case EVENT_DEAUTHENTICATED: 206 case EVENT_DEAUTHENTICATED:
@@ -292,7 +291,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
292 priv->adhoc_is_link_sensed = false; 291 priv->adhoc_is_link_sensed = false;
293 mwifiex_clean_txrx(priv); 292 mwifiex_clean_txrx(priv);
294 if (!netif_queue_stopped(priv->netdev)) 293 if (!netif_queue_stopped(priv->netdev))
295 netif_stop_queue(priv->netdev); 294 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
296 if (netif_carrier_ok(priv->netdev)) 295 if (netif_carrier_ok(priv->netdev))
297 netif_carrier_off(priv->netdev); 296 netif_carrier_off(priv->netdev);
298 break; 297 break;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 4b6f5539657d..6d990c798a20 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -234,7 +234,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
234 "associating...\n"); 234 "associating...\n");
235 235
236 if (!netif_queue_stopped(priv->netdev)) 236 if (!netif_queue_stopped(priv->netdev))
237 netif_stop_queue(priv->netdev); 237 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
238 238
239 /* Clear any past association response stored for 239 /* Clear any past association response stored for
240 * application retrieval */ 240 * application retrieval */
@@ -265,7 +265,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
265 ret = mwifiex_check_network_compatibility(priv, bss_desc); 265 ret = mwifiex_check_network_compatibility(priv, bss_desc);
266 266
267 if (!netif_queue_stopped(priv->netdev)) 267 if (!netif_queue_stopped(priv->netdev))
268 netif_stop_queue(priv->netdev); 268 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
269 269
270 if (!ret) { 270 if (!ret) {
271 dev_dbg(adapter->dev, "info: network found in scan" 271 dev_dbg(adapter->dev, "info: network found in scan"
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index a206f412875f..d9274a1b77ac 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -134,7 +134,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
134 if (!priv) 134 if (!priv)
135 goto done; 135 goto done;
136 136
137 priv->netdev->trans_start = jiffies; 137 mwifiex_set_trans_start(priv->netdev);
138 if (!status) { 138 if (!status) {
139 priv->stats.tx_packets++; 139 priv->stats.tx_packets++;
140 priv->stats.tx_bytes += skb->len; 140 priv->stats.tx_bytes += skb->len;
@@ -152,7 +152,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
152 if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) 152 if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
153 && (tpriv->media_connected)) { 153 && (tpriv->media_connected)) {
154 if (netif_queue_stopped(tpriv->netdev)) 154 if (netif_queue_stopped(tpriv->netdev))
155 netif_wake_queue(tpriv->netdev); 155 mwifiex_wake_up_net_dev_queue(tpriv->netdev,
156 adapter);
156 } 157 }
157 } 158 }
158done: 159done:
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 527cf5333db5..4df8cf64b56c 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -6,6 +6,7 @@
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/firmware.h> 7#include <linux/firmware.h>
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/module.h>
9 10
10#include "hermes.h" 11#include "hermes.h"
11#include "hermes_dld.h" 12#include "hermes_dld.h"
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 8b6f363b3f7d..fa8ce5104781 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -24,6 +24,7 @@
24 24
25#include <net/mac80211.h> 25#include <net/mac80211.h>
26#include <linux/crc-ccitt.h> 26#include <linux/crc-ccitt.h>
27#include <linux/export.h>
27 28
28#include "p54.h" 29#include "p54.h"
29#include "eeprom.h" 30#include "eeprom.h"
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 53a3408931be..18e82b31afa6 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -20,6 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <linux/etherdevice.h> 22#include <linux/etherdevice.h>
23#include <linux/export.h>
23 24
24#include <net/mac80211.h> 25#include <net/mac80211.h>
25 26
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index ad9ae04d07aa..db4d9a02f264 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -20,6 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <linux/etherdevice.h> 22#include <linux/etherdevice.h>
23#include <linux/module.h>
23 24
24#include <net/mac80211.h> 25#include <net/mac80211.h>
25 26
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1b753173680f..b1f51a215792 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -20,6 +20,7 @@
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/completion.h> 22#include <linux/completion.h>
23#include <linux/module.h>
23#include <net/mac80211.h> 24#include <net/mac80211.h>
24 25
25#include "p54.h" 26#include "p54.h"
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index a454d487b14f..7faed62c6378 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -584,8 +584,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
584 mutex_lock(&priv->mutex); 584 mutex_lock(&priv->mutex);
585 WARN_ON(priv->fw_state != FW_STATE_READY); 585 WARN_ON(priv->fw_state != FW_STATE_READY);
586 586
587 cancel_work_sync(&priv->work);
588
589 p54spi_power_off(priv); 587 p54spi_power_off(priv);
590 spin_lock_irqsave(&priv->tx_lock, flags); 588 spin_lock_irqsave(&priv->tx_lock, flags);
591 INIT_LIST_HEAD(&priv->tx_pending); 589 INIT_LIST_HEAD(&priv->tx_pending);
@@ -593,6 +591,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
593 591
594 priv->fw_state = FW_STATE_OFF; 592 priv->fw_state = FW_STATE_OFF;
595 mutex_unlock(&priv->mutex); 593 mutex_unlock(&priv->mutex);
594
595 cancel_work_sync(&priv->work);
596} 596}
597 597
598static int __devinit p54spi_probe(struct spi_device *spi) 598static int __devinit p54spi_probe(struct spi_device *spi)
@@ -652,6 +652,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
652 init_completion(&priv->fw_comp); 652 init_completion(&priv->fw_comp);
653 INIT_LIST_HEAD(&priv->tx_pending); 653 INIT_LIST_HEAD(&priv->tx_pending);
654 mutex_init(&priv->mutex); 654 mutex_init(&priv->mutex);
655 spin_lock_init(&priv->tx_lock);
655 SET_IEEE80211_DEV(hw, &spi->dev); 656 SET_IEEE80211_DEV(hw, &spi->dev);
656 priv->common.open = p54spi_op_start; 657 priv->common.open = p54spi_op_start;
657 priv->common.stop = p54spi_op_stop; 658 priv->common.stop = p54spi_op_stop;
@@ -699,7 +700,6 @@ static int __devexit p54spi_remove(struct spi_device *spi)
699static struct spi_driver p54spi_driver = { 700static struct spi_driver p54spi_driver = {
700 .driver = { 701 .driver = {
701 .name = "p54spi", 702 .name = "p54spi",
702 .bus = &spi_bus_type,
703 .owner = THIS_MODULE, 703 .owner = THIS_MODULE,
704 }, 704 },
705 705
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index a8f3bc740dfa..9b6096866427 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -20,6 +20,7 @@
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/crc32.h> 22#include <linux/crc32.h>
23#include <linux/module.h>
23#include <net/mac80211.h> 24#include <net/mac80211.h>
24 25
25#include "p54.h" 26#include "p54.h"
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f485784a60ae..42b97bc38477 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -16,6 +16,7 @@
16 * published by the Free Software Foundation. 16 * published by the Free Software Foundation.
17 */ 17 */
18 18
19#include <linux/export.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/firmware.h> 21#include <linux/firmware.h>
21#include <linux/etherdevice.h> 22#include <linux/etherdevice.h>
@@ -241,7 +242,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
241 242
242 skb_unlink(skb, &priv->tx_queue); 243 skb_unlink(skb, &priv->tx_queue);
243 p54_tx_qos_accounting_free(priv, skb); 244 p54_tx_qos_accounting_free(priv, skb);
244 dev_kfree_skb_any(skb); 245 ieee80211_free_txskb(dev, skb);
245} 246}
246EXPORT_SYMBOL_GPL(p54_free_skb); 247EXPORT_SYMBOL_GPL(p54_free_skb);
247 248
@@ -787,7 +788,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
787 &hdr_flags, &aid, &burst_allowed); 788 &hdr_flags, &aid, &burst_allowed);
788 789
789 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { 790 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
790 dev_kfree_skb_any(skb); 791 ieee80211_free_txskb(dev, skb);
791 return; 792 return;
792 } 793 }
793 794
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index d97a2caf582b..4e44b1af119a 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
778 dwrq->flags = 0; 778 dwrq->flags = 0;
779 dwrq->length = 0; 779 dwrq->length = 0;
780 } 780 }
781 essid->octets[essid->length] = '\0'; 781 essid->octets[dwrq->length] = '\0';
782 memcpy(extra, essid->octets, dwrq->length); 782 memcpy(extra, essid->octets, dwrq->length);
783 kfree(essid); 783 kfree(essid);
784 784
@@ -2493,323 +2493,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
2493 return ret; 2493 return ret;
2494} 2494}
2495 2495
2496/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
2497 * support. This is to be replaced with Linux wireless extensions once they
2498 * get WPA support. */
2499
2500/* Note II: please leave all this together as it will be easier to remove later,
2501 * once wireless extensions add WPA support -mcgrof */
2502
2503/* PRISM54_HOSTAPD ioctl() cmd: */
2504enum {
2505 PRISM2_SET_ENCRYPTION = 6,
2506 PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
2507 PRISM2_HOSTAPD_MLME = 13,
2508 PRISM2_HOSTAPD_SCAN_REQ = 14,
2509};
2510
2511#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12 2496#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
2512#define PRISM54_HOSTAPD SIOCIWFIRSTPRIV+25
2513#define PRISM54_DROP_UNENCRYPTED SIOCIWFIRSTPRIV+26
2514
2515#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
2516#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
2517 offsetof(struct prism2_hostapd_param, u.generic_elem.data)
2518
2519/* Maximum length for algorithm names (-1 for nul termination)
2520 * used in ioctl() */
2521#define HOSTAP_CRYPT_ALG_NAME_LEN 16
2522
2523struct prism2_hostapd_param {
2524 u32 cmd;
2525 u8 sta_addr[ETH_ALEN];
2526 union {
2527 struct {
2528 u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
2529 u32 flags;
2530 u32 err;
2531 u8 idx;
2532 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
2533 u16 key_len;
2534 u8 key[0];
2535 } crypt;
2536 struct {
2537 u8 len;
2538 u8 data[0];
2539 } generic_elem;
2540 struct {
2541#define MLME_STA_DEAUTH 0
2542#define MLME_STA_DISASSOC 1
2543 u16 cmd;
2544 u16 reason_code;
2545 } mlme;
2546 struct {
2547 u8 ssid_len;
2548 u8 ssid[32];
2549 } scan_req;
2550 } u;
2551};
2552
2553
2554static int
2555prism2_ioctl_set_encryption(struct net_device *dev,
2556 struct prism2_hostapd_param *param,
2557 int param_len)
2558{
2559 islpci_private *priv = netdev_priv(dev);
2560 int rvalue = 0, force = 0;
2561 int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
2562 union oid_res_t r;
2563
2564 /* with the new API, it's impossible to get a NULL pointer.
2565 * New version of iwconfig set the IW_ENCODE_NOKEY flag
2566 * when no key is given, but older versions don't. */
2567
2568 if (param->u.crypt.key_len > 0) {
2569 /* we have a key to set */
2570 int index = param->u.crypt.idx;
2571 int current_index;
2572 struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
2573
2574 /* get the current key index */
2575 rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
2576 current_index = r.u;
2577 /* Verify that the key is not marked as invalid */
2578 if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
2579 key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
2580 sizeof (param->u.crypt.key) : param->u.crypt.key_len;
2581 memcpy(key.key, param->u.crypt.key, key.length);
2582 if (key.length == 32)
2583 /* we want WPA-PSK */
2584 key.type = DOT11_PRIV_TKIP;
2585 if ((index < 0) || (index > 3))
2586 /* no index provided use the current one */
2587 index = current_index;
2588
2589 /* now send the key to the card */
2590 rvalue |=
2591 mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
2592 &key);
2593 }
2594 /*
2595 * If a valid key is set, encryption should be enabled
2596 * (user may turn it off later).
2597 * This is also how "iwconfig ethX key on" works
2598 */
2599 if ((index == current_index) && (key.length > 0))
2600 force = 1;
2601 } else {
2602 int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
2603 if ((index >= 0) && (index <= 3)) {
2604 /* we want to set the key index */
2605 rvalue |=
2606 mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
2607 &index);
2608 } else {
2609 if (!(param->u.crypt.flags & IW_ENCODE_MODE)) {
2610 /* we cannot do anything. Complain. */
2611 return -EINVAL;
2612 }
2613 }
2614 }
2615 /* now read the flags */
2616 if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
2617 /* Encoding disabled,
2618 * authen = DOT11_AUTH_OS;
2619 * invoke = 0;
2620 * exunencrypt = 0; */
2621 }
2622 if (param->u.crypt.flags & IW_ENCODE_OPEN)
2623 /* Encode but accept non-encoded packets. No auth */
2624 invoke = 1;
2625 if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
2626 /* Refuse non-encoded packets. Auth */
2627 authen = DOT11_AUTH_BOTH;
2628 invoke = 1;
2629 exunencrypt = 1;
2630 }
2631 /* do the change if requested */
2632 if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
2633 rvalue |=
2634 mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
2635 rvalue |=
2636 mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
2637 rvalue |=
2638 mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
2639 &exunencrypt);
2640 }
2641 return rvalue;
2642}
2643
2644static int
2645prism2_ioctl_set_generic_element(struct net_device *ndev,
2646 struct prism2_hostapd_param *param,
2647 int param_len)
2648{
2649 islpci_private *priv = netdev_priv(ndev);
2650 int max_len, len, alen, ret=0;
2651 struct obj_attachment *attach;
2652
2653 len = param->u.generic_elem.len;
2654 max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
2655 if (max_len < 0 || max_len < len)
2656 return -EINVAL;
2657
2658 alen = sizeof(*attach) + len;
2659 attach = kzalloc(alen, GFP_KERNEL);
2660 if (attach == NULL)
2661 return -ENOMEM;
2662
2663#define WLAN_FC_TYPE_MGMT 0
2664#define WLAN_FC_STYPE_ASSOC_REQ 0
2665#define WLAN_FC_STYPE_REASSOC_REQ 2
2666
2667 /* Note: endianness is covered by mgt_set_varlen */
2668
2669 attach->type = (WLAN_FC_TYPE_MGMT << 2) |
2670 (WLAN_FC_STYPE_ASSOC_REQ << 4);
2671 attach->id = -1;
2672 attach->size = len;
2673 memcpy(attach->data, param->u.generic_elem.data, len);
2674
2675 ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
2676
2677 if (ret == 0) {
2678 attach->type = (WLAN_FC_TYPE_MGMT << 2) |
2679 (WLAN_FC_STYPE_REASSOC_REQ << 4);
2680
2681 ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
2682
2683 if (ret == 0)
2684 printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
2685 ndev->name);
2686 }
2687
2688 kfree(attach);
2689 return ret;
2690
2691}
2692
2693static int
2694prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
2695{
2696 return -EOPNOTSUPP;
2697}
2698
2699static int
2700prism2_ioctl_scan_req(struct net_device *ndev,
2701 struct prism2_hostapd_param *param)
2702{
2703 islpci_private *priv = netdev_priv(ndev);
2704 struct iw_request_info info;
2705 int i, rvalue;
2706 struct obj_bsslist *bsslist;
2707 u32 noise = 0;
2708 char *extra = "";
2709 char *current_ev = "foo";
2710 union oid_res_t r;
2711
2712 if (islpci_get_state(priv) < PRV_STATE_INIT) {
2713 /* device is not ready, fail gently */
2714 return 0;
2715 }
2716
2717 /* first get the noise value. We will use it to report the link quality */
2718 rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
2719 noise = r.u;
2720
2721 /* Ask the device for a list of known bss. We can report at most
2722 * IW_MAX_AP=64 to the range struct. But the device won't repport anything
2723 * if you change the value of IWMAX_BSS=24.
2724 */
2725 rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
2726 bsslist = r.ptr;
2727
2728 info.cmd = PRISM54_HOSTAPD;
2729 info.flags = 0;
2730
2731 /* ok now, scan the list and translate its info */
2732 for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
2733 current_ev = prism54_translate_bss(ndev, &info, current_ev,
2734 extra + IW_SCAN_MAX_DATA,
2735 &(bsslist->bsslist[i]),
2736 noise);
2737 kfree(bsslist);
2738
2739 return rvalue;
2740}
2741
2742static int
2743prism54_hostapd(struct net_device *ndev, struct iw_point *p)
2744{
2745 struct prism2_hostapd_param *param;
2746 int ret = 0;
2747 u32 uwrq;
2748
2749 printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
2750 if (p->length < sizeof(struct prism2_hostapd_param) ||
2751 p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
2752 return -EINVAL;
2753
2754 param = memdup_user(p->pointer, p->length);
2755 if (IS_ERR(param))
2756 return PTR_ERR(param);
2757
2758 switch (param->cmd) {
2759 case PRISM2_SET_ENCRYPTION:
2760 printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
2761 ndev->name);
2762 ret = prism2_ioctl_set_encryption(ndev, param, p->length);
2763 break;
2764 case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
2765 printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
2766 ndev->name);
2767 ret = prism2_ioctl_set_generic_element(ndev, param,
2768 p->length);
2769 break;
2770 case PRISM2_HOSTAPD_MLME:
2771 printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
2772 ndev->name);
2773 ret = prism2_ioctl_mlme(ndev, param);
2774 break;
2775 case PRISM2_HOSTAPD_SCAN_REQ:
2776 printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
2777 ndev->name);
2778 ret = prism2_ioctl_scan_req(ndev, param);
2779 break;
2780 case PRISM54_SET_WPA:
2781 printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
2782 ndev->name);
2783 uwrq = 1;
2784 ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
2785 break;
2786 case PRISM54_DROP_UNENCRYPTED:
2787 printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
2788 ndev->name);
2789#if 0
2790 uwrq = 0x01;
2791 mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
2792 down_write(&priv->mib_sem);
2793 mgt_commit(priv);
2794 up_write(&priv->mib_sem);
2795#endif
2796 /* Not necessary, as set_wpa does it, should we just do it here though? */
2797 ret = 0;
2798 break;
2799 default:
2800 printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
2801 ndev->name);
2802 ret = -EOPNOTSUPP;
2803 break;
2804 }
2805
2806 if (ret == 0 && copy_to_user(p->pointer, param, p->length))
2807 ret = -EFAULT;
2808
2809 kfree(param);
2810
2811 return ret;
2812}
2813 2497
2814static int 2498static int
2815prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info, 2499prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
@@ -3223,20 +2907,3 @@ const struct iw_handler_def prism54_handler_def = {
3223 .private_args = (struct iw_priv_args *) prism54_private_args, 2907 .private_args = (struct iw_priv_args *) prism54_private_args,
3224 .get_wireless_stats = prism54_get_wireless_stats, 2908 .get_wireless_stats = prism54_get_wireless_stats,
3225}; 2909};
3226
3227/* For wpa_supplicant */
3228
3229int
3230prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
3231{
3232 struct iwreq *wrq = (struct iwreq *) rq;
3233 int ret = -1;
3234 switch (cmd) {
3235 case PRISM54_HOSTAPD:
3236 if (!capable(CAP_NET_ADMIN))
3237 return -EPERM;
3238 ret = prism54_hostapd(ndev, &wrq->u.data);
3239 return ret;
3240 }
3241 return -EOPNOTSUPP;
3242}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index bcfbfb9281d2..a34bceb6e3cd 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -43,8 +43,6 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv);
43 43
44int prism54_set_mac_address(struct net_device *, void *); 44int prism54_set_mac_address(struct net_device *, void *);
45 45
46int prism54_ioctl(struct net_device *, struct ifreq *, int);
47
48extern const struct iw_handler_def prism54_handler_def; 46extern const struct iw_handler_def prism54_handler_def;
49 47
50#endif /* _ISL_IOCTL_H */ 48#endif /* _ISL_IOCTL_H */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 8a3cf4fe376f..5970ff6f40cc 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -804,7 +804,6 @@ static const struct ethtool_ops islpci_ethtool_ops = {
804static const struct net_device_ops islpci_netdev_ops = { 804static const struct net_device_ops islpci_netdev_ops = {
805 .ndo_open = islpci_open, 805 .ndo_open = islpci_open,
806 .ndo_stop = islpci_close, 806 .ndo_stop = islpci_close,
807 .ndo_do_ioctl = prism54_ioctl,
808 .ndo_start_xmit = islpci_eth_transmit, 807 .ndo_start_xmit = islpci_eth_transmit,
809 .ndo_tx_timeout = islpci_eth_tx_timeout, 808 .ndo_tx_timeout = islpci_eth_tx_timeout,
810 .ndo_set_mac_address = prism54_set_mac_address, 809 .ndo_set_mac_address = prism54_set_mac_address,
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 620e3c0e88e0..3802c31fefcd 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -244,6 +244,10 @@ enum ndis_80211_power_mode {
244 NDIS_80211_POWER_MODE_FAST_PSP, 244 NDIS_80211_POWER_MODE_FAST_PSP,
245}; 245};
246 246
247enum ndis_80211_pmkid_cand_list_flag_bits {
248 NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0)
249};
250
247struct ndis_80211_auth_request { 251struct ndis_80211_auth_request {
248 __le32 length; 252 __le32 length;
249 u8 bssid[6]; 253 u8 bssid[6];
@@ -387,19 +391,17 @@ struct ndis_80211_capability {
387struct ndis_80211_bssid_info { 391struct ndis_80211_bssid_info {
388 u8 bssid[6]; 392 u8 bssid[6];
389 u8 pmkid[16]; 393 u8 pmkid[16];
390}; 394} __packed;
391 395
392struct ndis_80211_pmkid { 396struct ndis_80211_pmkid {
393 __le32 length; 397 __le32 length;
394 __le32 bssid_info_count; 398 __le32 bssid_info_count;
395 struct ndis_80211_bssid_info bssid_info[0]; 399 struct ndis_80211_bssid_info bssid_info[0];
396}; 400} __packed;
397 401
398/* 402/*
399 * private data 403 * private data
400 */ 404 */
401#define NET_TYPE_11FB 0
402
403#define CAP_MODE_80211A 1 405#define CAP_MODE_80211A 1
404#define CAP_MODE_80211B 2 406#define CAP_MODE_80211B 2
405#define CAP_MODE_80211G 4 407#define CAP_MODE_80211G 4
@@ -1347,6 +1349,32 @@ static int set_channel(struct usbnet *usbdev, int channel)
1347 return ret; 1349 return ret;
1348} 1350}
1349 1351
1352static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
1353 u16 *beacon_interval)
1354{
1355 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1356 struct ieee80211_channel *channel;
1357 struct ndis_80211_conf config;
1358 int len, ret;
1359
1360 /* Get channel and beacon interval */
1361 len = sizeof(config);
1362 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
1363 netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
1364 __func__, ret);
1365 if (ret < 0)
1366 return NULL;
1367
1368 channel = ieee80211_get_channel(priv->wdev.wiphy,
1369 KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
1370 if (!channel)
1371 return NULL;
1372
1373 if (beacon_interval)
1374 *beacon_interval = le16_to_cpu(config.beacon_period);
1375 return channel;
1376}
1377
1350/* index must be 0 - N, as per NDIS */ 1378/* index must be 0 - N, as per NDIS */
1351static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, 1379static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
1352 int index) 1380 int index)
@@ -2650,13 +2678,12 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
2650{ 2678{
2651 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2679 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2652 struct ieee80211_channel *channel; 2680 struct ieee80211_channel *channel;
2653 struct ndis_80211_conf config;
2654 struct ndis_80211_ssid ssid; 2681 struct ndis_80211_ssid ssid;
2655 struct cfg80211_bss *bss; 2682 struct cfg80211_bss *bss;
2656 s32 signal; 2683 s32 signal;
2657 u64 timestamp; 2684 u64 timestamp;
2658 u16 capability; 2685 u16 capability;
2659 u16 beacon_interval; 2686 u16 beacon_interval = 0;
2660 __le32 rssi; 2687 __le32 rssi;
2661 u8 ie_buf[34]; 2688 u8 ie_buf[34];
2662 int len, ret, ie_len; 2689 int len, ret, ie_len;
@@ -2681,22 +2708,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
2681 } 2708 }
2682 2709
2683 /* Get channel and beacon interval */ 2710 /* Get channel and beacon interval */
2684 len = sizeof(config); 2711 channel = get_current_channel(usbdev, &beacon_interval);
2685 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); 2712 if (!channel) {
2686 netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n", 2713 netdev_warn(usbdev->net, "%s(): could not get channel.\n",
2687 __func__, ret); 2714 __func__);
2688 if (ret >= 0) {
2689 beacon_interval = le16_to_cpu(config.beacon_period);
2690 channel = ieee80211_get_channel(priv->wdev.wiphy,
2691 KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
2692 if (!channel) {
2693 netdev_warn(usbdev->net, "%s(): could not get channel."
2694 "\n", __func__);
2695 return;
2696 }
2697 } else {
2698 netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
2699 __func__);
2700 return; 2715 return;
2701 } 2716 }
2702 2717
@@ -2841,8 +2856,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2841 req_ie_len, resp_ie, 2856 req_ie_len, resp_ie,
2842 resp_ie_len, 0, GFP_KERNEL); 2857 resp_ie_len, 0, GFP_KERNEL);
2843 else 2858 else
2844 cfg80211_roamed(usbdev->net, NULL, bssid, 2859 cfg80211_roamed(usbdev->net,
2845 req_ie, req_ie_len, 2860 get_current_channel(usbdev, NULL),
2861 bssid, req_ie, req_ie_len,
2846 resp_ie, resp_ie_len, GFP_KERNEL); 2862 resp_ie, resp_ie_len, GFP_KERNEL);
2847 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) 2863 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
2848 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); 2864 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
@@ -3008,25 +3024,13 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
3008 for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) { 3024 for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) {
3009 struct ndis_80211_pmkid_candidate *cand = 3025 struct ndis_80211_pmkid_candidate *cand =
3010 &cand_list->candidate_list[i]; 3026 &cand_list->candidate_list[i];
3027 bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH);
3011 3028
3012 netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n", 3029 netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n",
3013 i, le32_to_cpu(cand->flags), cand->bssid); 3030 i, le32_to_cpu(cand->flags), preauth, cand->bssid);
3014
3015#if 0
3016 struct iw_pmkid_cand pcand;
3017 union iwreq_data wrqu;
3018 3031
3019 memset(&pcand, 0, sizeof(pcand)); 3032 cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid,
3020 if (le32_to_cpu(cand->flags) & 0x01) 3033 preauth, GFP_ATOMIC);
3021 pcand.flags |= IW_PMKID_CAND_PREAUTH;
3022 pcand.index = i;
3023 memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN);
3024
3025 memset(&wrqu, 0, sizeof(wrqu));
3026 wrqu.data.length = sizeof(pcand);
3027 wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu,
3028 (u8 *)&pcand);
3029#endif
3030 } 3034 }
3031} 3035}
3032 3036
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 3f183a15186e..e5df380d4fbe 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1203,8 +1203,10 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
1203 !(filter_flags & FIF_CONTROL)); 1203 !(filter_flags & FIF_CONTROL));
1204 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL, 1204 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
1205 !(filter_flags & FIF_PSPOLL)); 1205 !(filter_flags & FIF_PSPOLL));
1206 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1); 1206 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
1207 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0); 1207 !(filter_flags & FIF_CONTROL));
1208 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
1209 !(filter_flags & FIF_CONTROL));
1208 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL, 1210 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
1209 !(filter_flags & FIF_CONTROL)); 1211 !(filter_flags & FIF_CONTROL));
1210 rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg); 1212 rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
@@ -3771,7 +3773,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
3771 /* Apparently the data is read from end to start */ 3773 /* Apparently the data is read from end to start */
3772 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg); 3774 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
3773 /* The returned value is in CPU order, but eeprom is le */ 3775 /* The returned value is in CPU order, but eeprom is le */
3774 rt2x00dev->eeprom[i] = cpu_to_le32(reg); 3776 *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
3775 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg); 3777 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
3776 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); 3778 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
3777 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg); 3779 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index f1565792f270..377876315b8d 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -919,6 +919,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
919 { USB_DEVICE(0x050d, 0x935b) }, 919 { USB_DEVICE(0x050d, 0x935b) },
920 /* Buffalo */ 920 /* Buffalo */
921 { USB_DEVICE(0x0411, 0x00e8) }, 921 { USB_DEVICE(0x0411, 0x00e8) },
922 { USB_DEVICE(0x0411, 0x0158) },
922 { USB_DEVICE(0x0411, 0x016f) }, 923 { USB_DEVICE(0x0411, 0x016f) },
923 { USB_DEVICE(0x0411, 0x01a2) }, 924 { USB_DEVICE(0x0411, 0x01a2) },
924 /* Corega */ 925 /* Corega */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 2ec5c00235e6..99ff12d0c29d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -943,6 +943,7 @@ struct rt2x00_dev {
943 * Powersaving work 943 * Powersaving work
944 */ 944 */
945 struct delayed_work autowakeup_work; 945 struct delayed_work autowakeup_work;
946 struct work_struct sleep_work;
946 947
947 /* 948 /*
948 * Data queue arrays for RX, TX, Beacon and ATIM. 949 * Data queue arrays for RX, TX, Beacon and ATIM.
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e1fb2a8569be..c3e1aa7c1a80 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -465,6 +465,23 @@ static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie)
465 return NULL; 465 return NULL;
466} 466}
467 467
468static void rt2x00lib_sleep(struct work_struct *work)
469{
470 struct rt2x00_dev *rt2x00dev =
471 container_of(work, struct rt2x00_dev, sleep_work);
472
473 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
474 return;
475
476 /*
477 * Check again is powersaving is enabled, to prevent races from delayed
478 * work execution.
479 */
480 if (!test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
481 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf,
482 IEEE80211_CONF_CHANGE_PS);
483}
484
468static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev, 485static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
469 struct sk_buff *skb, 486 struct sk_buff *skb,
470 struct rxdone_entry_desc *rxdesc) 487 struct rxdone_entry_desc *rxdesc)
@@ -512,8 +529,7 @@ static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
512 cam |= (tim_ie->bitmap_ctrl & 0x01); 529 cam |= (tim_ie->bitmap_ctrl & 0x01);
513 530
514 if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) 531 if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags))
515 rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, 532 queue_work(rt2x00dev->workqueue, &rt2x00dev->sleep_work);
516 IEEE80211_CONF_CHANGE_PS);
517} 533}
518 534
519static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, 535static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
@@ -815,11 +831,11 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
815 if (spec->supported_rates & SUPPORT_RATE_OFDM) 831 if (spec->supported_rates & SUPPORT_RATE_OFDM)
816 num_rates += 8; 832 num_rates += 8;
817 833
818 channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL); 834 channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL);
819 if (!channels) 835 if (!channels)
820 return -ENOMEM; 836 return -ENOMEM;
821 837
822 rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL); 838 rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
823 if (!rates) 839 if (!rates)
824 goto exit_free_channels; 840 goto exit_free_channels;
825 841
@@ -1141,6 +1157,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1141 1157
1142 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 1158 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
1143 INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup); 1159 INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup);
1160 INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep);
1144 1161
1145 /* 1162 /*
1146 * Let the driver probe the device to detect the capabilities. 1163 * Let the driver probe the device to detect the capabilities.
@@ -1197,6 +1214,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1197 */ 1214 */
1198 cancel_work_sync(&rt2x00dev->intf_work); 1215 cancel_work_sync(&rt2x00dev->intf_work);
1199 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); 1216 cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
1217 cancel_work_sync(&rt2x00dev->sleep_work);
1200 if (rt2x00_is_usb(rt2x00dev)) { 1218 if (rt2x00_is_usb(rt2x00dev)) {
1201 del_timer_sync(&rt2x00dev->txstatus_timer); 1219 del_timer_sync(&rt2x00dev->txstatus_timer);
1202 cancel_work_sync(&rt2x00dev->rxdone_work); 1220 cancel_work_sync(&rt2x00dev->rxdone_work);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index bf0acff07807..ede3c58e6783 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -160,7 +160,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
160 exit_fail: 160 exit_fail:
161 rt2x00queue_pause_queue(queue); 161 rt2x00queue_pause_queue(queue);
162 exit_free_skb: 162 exit_free_skb:
163 dev_kfree_skb_any(skb); 163 ieee80211_free_txskb(hw, skb);
164} 164}
165EXPORT_SYMBOL_GPL(rt2x00mac_tx); 165EXPORT_SYMBOL_GPL(rt2x00mac_tx);
166 166
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 0082015ff664..2f14a5fb0cbb 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/etherdevice.h> 23#include <linux/etherdevice.h>
24#include <linux/eeprom_93cx6.h> 24#include <linux/eeprom_93cx6.h>
25#include <linux/module.h>
25#include <net/mac80211.h> 26#include <net/mac80211.h>
26 27
27#include "rtl8180.h" 28#include "rtl8180.h"
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 24873b55b55c..4a78f9e39dfa 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -26,6 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
28#include <linux/eeprom_93cx6.h> 28#include <linux/eeprom_93cx6.h>
29#include <linux/module.h>
29#include <net/mac80211.h> 30#include <net/mac80211.h>
30 31
31#include "rtl8187.h" 32#include "rtl8187.h"
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 45e14760c16e..d6c42e69bdbd 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -12,7 +12,7 @@ config RTL8192CE
12 12
13config RTL8192SE 13config RTL8192SE
14 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter" 14 tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
15 depends on MAC80211 && EXPERIMENTAL 15 depends on MAC80211 && EXPERIMENTAL && PCI
16 select FW_LOADER 16 select FW_LOADER
17 select RTLWIFI 17 select RTLWIFI
18 ---help--- 18 ---help---
@@ -23,7 +23,7 @@ config RTL8192SE
23 23
24config RTL8192DE 24config RTL8192DE
25 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter" 25 tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
26 depends on MAC80211 && EXPERIMENTAL 26 depends on MAC80211 && EXPERIMENTAL && PCI
27 select FW_LOADER 27 select FW_LOADER
28 select RTLWIFI 28 select RTLWIFI
29 ---help--- 29 ---help---
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index d4fdd2a5a739..d81a6021a30f 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -30,6 +30,7 @@
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 31
32#include <linux/ip.h> 32#include <linux/ip.h>
33#include <linux/module.h>
33#include "wifi.h" 34#include "wifi.h"
34#include "rc.h" 35#include "rc.h"
35#include "base.h" 36#include "base.h"
@@ -344,9 +345,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
344 if (is_valid_ether_addr(rtlefuse->dev_addr)) { 345 if (is_valid_ether_addr(rtlefuse->dev_addr)) {
345 SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); 346 SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
346 } else { 347 } else {
347 u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; 348 u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
348 get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1); 349 get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
349 SET_IEEE80211_PERM_ADDR(hw, rtlmac); 350 SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
350 } 351 }
351 352
352} 353}
@@ -447,12 +448,11 @@ int rtl_init_core(struct ieee80211_hw *hw)
447 448
448 /* <4> locks */ 449 /* <4> locks */
449 mutex_init(&rtlpriv->locks.conf_mutex); 450 mutex_init(&rtlpriv->locks.conf_mutex);
450 spin_lock_init(&rtlpriv->locks.ips_lock); 451 mutex_init(&rtlpriv->locks.ps_mutex);
451 spin_lock_init(&rtlpriv->locks.irq_th_lock); 452 spin_lock_init(&rtlpriv->locks.irq_th_lock);
452 spin_lock_init(&rtlpriv->locks.h2c_lock); 453 spin_lock_init(&rtlpriv->locks.h2c_lock);
453 spin_lock_init(&rtlpriv->locks.rf_ps_lock); 454 spin_lock_init(&rtlpriv->locks.rf_ps_lock);
454 spin_lock_init(&rtlpriv->locks.rf_lock); 455 spin_lock_init(&rtlpriv->locks.rf_lock);
455 spin_lock_init(&rtlpriv->locks.lps_lock);
456 spin_lock_init(&rtlpriv->locks.waitq_lock); 456 spin_lock_init(&rtlpriv->locks.waitq_lock);
457 spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); 457 spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
458 458
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 4ae905983d0d..f66b5757f6b9 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -76,7 +76,7 @@ enum ap_peer {
76 SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val) 76 SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
77 77
78#define SET_80211_PS_POLL_AID(_hdr, _val) \ 78#define SET_80211_PS_POLL_AID(_hdr, _val) \
79 (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val)) 79 (*(u16 *)((u8 *)(_hdr) + 2) = _val)
80#define SET_80211_PS_POLL_BSSID(_hdr, _val) \ 80#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
81 memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN) 81 memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
82#define SET_80211_PS_POLL_TA(_hdr, _val) \ 82#define SET_80211_PS_POLL_TA(_hdr, _val) \
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 7babb6acd957..dc36d7461caa 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -29,6 +29,7 @@
29 29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 31
32#include <linux/export.h>
32#include "wifi.h" 33#include "wifi.h"
33#include "cam.h" 34#include "cam.h"
34 35
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 3fc21f60bb04..ed1058b71587 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -27,6 +27,7 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/export.h>
30#include "wifi.h" 31#include "wifi.h"
31#include "efuse.h" 32#include "efuse.h"
32 33
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 177a8e669241..0d4d242849b4 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -27,6 +27,7 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/export.h>
30#include "core.h" 31#include "core.h"
31#include "wifi.h" 32#include "wifi.h"
32#include "pci.h" 33#include "pci.h"
@@ -609,7 +610,7 @@ tx_status_ok:
609 if (((rtlpriv->link_info.num_rx_inperiod + 610 if (((rtlpriv->link_info.num_rx_inperiod +
610 rtlpriv->link_info.num_tx_inperiod) > 8) || 611 rtlpriv->link_info.num_tx_inperiod) > 8) ||
611 (rtlpriv->link_info.num_rx_inperiod > 2)) { 612 (rtlpriv->link_info.num_rx_inperiod > 2)) {
612 tasklet_schedule(&rtlpriv->works.ips_leave_tasklet); 613 schedule_work(&rtlpriv->works.lps_leave_work);
613 } 614 }
614} 615}
615 616
@@ -735,7 +736,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
735 if (((rtlpriv->link_info.num_rx_inperiod + 736 if (((rtlpriv->link_info.num_rx_inperiod +
736 rtlpriv->link_info.num_tx_inperiod) > 8) || 737 rtlpriv->link_info.num_tx_inperiod) > 8) ||
737 (rtlpriv->link_info.num_rx_inperiod > 2)) { 738 (rtlpriv->link_info.num_rx_inperiod > 2)) {
738 tasklet_schedule(&rtlpriv->works.ips_leave_tasklet); 739 schedule_work(&rtlpriv->works.lps_leave_work);
739 } 740 }
740 741
741 dev_kfree_skb_any(skb); 742 dev_kfree_skb_any(skb);
@@ -779,6 +780,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
779 unsigned long flags; 780 unsigned long flags;
780 u32 inta = 0; 781 u32 inta = 0;
781 u32 intb = 0; 782 u32 intb = 0;
783 irqreturn_t ret = IRQ_HANDLED;
782 784
783 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); 785 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
784 786
@@ -786,8 +788,10 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
786 rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb); 788 rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
787 789
788 /*Shared IRQ or HW disappared */ 790 /*Shared IRQ or HW disappared */
789 if (!inta || inta == 0xffff) 791 if (!inta || inta == 0xffff) {
792 ret = IRQ_NONE;
790 goto done; 793 goto done;
794 }
791 795
792 /*<1> beacon related */ 796 /*<1> beacon related */
793 if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) { 797 if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
@@ -889,12 +893,9 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
889 if (rtlpriv->rtlhal.earlymode_enable) 893 if (rtlpriv->rtlhal.earlymode_enable)
890 tasklet_schedule(&rtlpriv->works.irq_tasklet); 894 tasklet_schedule(&rtlpriv->works.irq_tasklet);
891 895
892 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
893 return IRQ_HANDLED;
894
895done: 896done:
896 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); 897 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
897 return IRQ_HANDLED; 898 return ret;
898} 899}
899 900
900static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) 901static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
@@ -902,11 +903,6 @@ static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
902 _rtl_pci_tx_chk_waitq(hw); 903 _rtl_pci_tx_chk_waitq(hw);
903} 904}
904 905
905static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
906{
907 rtl_lps_leave(hw);
908}
909
910static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) 906static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
911{ 907{
912 struct rtl_priv *rtlpriv = rtl_priv(hw); 908 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -944,6 +940,15 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
944 return; 940 return;
945} 941}
946 942
943static void rtl_lps_leave_work_callback(struct work_struct *work)
944{
945 struct rtl_works *rtlworks =
946 container_of(work, struct rtl_works, lps_leave_work);
947 struct ieee80211_hw *hw = rtlworks->hw;
948
949 rtl_lps_leave(hw);
950}
951
947static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) 952static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
948{ 953{
949 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 954 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1005,9 +1010,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
1005 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, 1010 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1006 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet, 1011 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1007 (unsigned long)hw); 1012 (unsigned long)hw);
1008 tasklet_init(&rtlpriv->works.ips_leave_tasklet, 1013 INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback);
1009 (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
1010 (unsigned long)hw);
1011} 1014}
1012 1015
1013static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, 1016static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1477,7 +1480,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
1477 1480
1478 synchronize_irq(rtlpci->pdev->irq); 1481 synchronize_irq(rtlpci->pdev->irq);
1479 tasklet_kill(&rtlpriv->works.irq_tasklet); 1482 tasklet_kill(&rtlpriv->works.irq_tasklet);
1480 tasklet_kill(&rtlpriv->works.ips_leave_tasklet); 1483 cancel_work_sync(&rtlpriv->works.lps_leave_work);
1481 1484
1482 flush_workqueue(rtlpriv->works.rtl_wq); 1485 flush_workqueue(rtlpriv->works.rtl_wq);
1483 destroy_workqueue(rtlpriv->works.rtl_wq); 1486 destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1552,7 +1555,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1552 set_hal_stop(rtlhal); 1555 set_hal_stop(rtlhal);
1553 1556
1554 rtlpriv->cfg->ops->disable_interrupt(hw); 1557 rtlpriv->cfg->ops->disable_interrupt(hw);
1555 tasklet_kill(&rtlpriv->works.ips_leave_tasklet); 1558 cancel_work_sync(&rtlpriv->works.lps_leave_work);
1556 1559
1557 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags); 1560 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1558 while (ppsc->rfchange_inprogress) { 1561 while (ppsc->rfchange_inprogress) {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index a693feffbe72..a14a68b24635 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -27,6 +27,7 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/export.h>
30#include "wifi.h" 31#include "wifi.h"
31#include "base.h" 32#include "base.h"
32#include "ps.h" 33#include "ps.h"
@@ -240,7 +241,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
240 if (mac->opmode != NL80211_IFTYPE_STATION) 241 if (mac->opmode != NL80211_IFTYPE_STATION)
241 return; 242 return;
242 243
243 spin_lock(&rtlpriv->locks.ips_lock); 244 mutex_lock(&rtlpriv->locks.ps_mutex);
244 245
245 if (ppsc->inactiveps) { 246 if (ppsc->inactiveps) {
246 rtstate = ppsc->rfpwr_state; 247 rtstate = ppsc->rfpwr_state;
@@ -256,7 +257,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
256 } 257 }
257 } 258 }
258 259
259 spin_unlock(&rtlpriv->locks.ips_lock); 260 mutex_unlock(&rtlpriv->locks.ps_mutex);
260} 261}
261 262
262/*for FW LPS*/ 263/*for FW LPS*/
@@ -394,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
394 if (mac->link_state != MAC80211_LINKED) 395 if (mac->link_state != MAC80211_LINKED)
395 return; 396 return;
396 397
397 spin_lock(&rtlpriv->locks.lps_lock); 398 mutex_lock(&rtlpriv->locks.ps_mutex);
398 399
399 /* Idle for a while if we connect to AP a while ago. */ 400 /* Idle for a while if we connect to AP a while ago. */
400 if (mac->cnt_after_linked >= 2) { 401 if (mac->cnt_after_linked >= 2) {
@@ -406,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
406 } 407 }
407 } 408 }
408 409
409 spin_unlock(&rtlpriv->locks.lps_lock); 410 mutex_unlock(&rtlpriv->locks.ps_mutex);
410} 411}
411 412
412/*Leave the leisure power save mode.*/ 413/*Leave the leisure power save mode.*/
@@ -416,7 +417,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
416 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 417 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
417 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 418 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
418 419
419 spin_lock(&rtlpriv->locks.lps_lock); 420 mutex_lock(&rtlpriv->locks.ps_mutex);
420 421
421 if (ppsc->fwctrl_lps) { 422 if (ppsc->fwctrl_lps) {
422 if (ppsc->dot11_psmode != EACTIVE) { 423 if (ppsc->dot11_psmode != EACTIVE) {
@@ -437,7 +438,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
437 rtl_lps_set_psmode(hw, EACTIVE); 438 rtl_lps_set_psmode(hw, EACTIVE);
438 } 439 }
439 } 440 }
440 spin_unlock(&rtlpriv->locks.lps_lock); 441 mutex_unlock(&rtlpriv->locks.ps_mutex);
441} 442}
442 443
443/* For sw LPS*/ 444/* For sw LPS*/
@@ -538,9 +539,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
538 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); 539 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
539 } 540 }
540 541
541 spin_lock(&rtlpriv->locks.lps_lock); 542 mutex_lock(&rtlpriv->locks.ps_mutex);
542 rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); 543 rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
543 spin_unlock(&rtlpriv->locks.lps_lock); 544 mutex_unlock(&rtlpriv->locks.ps_mutex);
544} 545}
545 546
546void rtl_swlps_rfon_wq_callback(void *data) 547void rtl_swlps_rfon_wq_callback(void *data)
@@ -573,9 +574,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
573 if (rtlpriv->link_info.busytraffic) 574 if (rtlpriv->link_info.busytraffic)
574 return; 575 return;
575 576
576 spin_lock(&rtlpriv->locks.lps_lock); 577 mutex_lock(&rtlpriv->locks.ps_mutex);
577 rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); 578 rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
578 spin_unlock(&rtlpriv->locks.lps_lock); 579 mutex_unlock(&rtlpriv->locks.ps_mutex);
579 580
580 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && 581 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
581 !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { 582 !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index a00774e7090d..72a98cab6f69 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -27,6 +27,7 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/export.h>
30#include "dm_common.h" 31#include "dm_common.h"
31#include "phy_common.h" 32#include "phy_common.h"
32#include "../pci.h" 33#include "../pci.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 49a064bdbce6..931d97979b04 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -30,6 +30,7 @@
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 31
32#include <linux/firmware.h> 32#include <linux/firmware.h>
33#include <linux/export.h>
33#include "../wifi.h" 34#include "../wifi.h"
34#include "../pci.h" 35#include "../pci.h"
35#include "../base.h" 36#include "../base.h"
@@ -72,6 +73,34 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
72 } 73 }
73} 74}
74 75
76static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer,
77 u32 size)
78{
79 struct rtl_priv *rtlpriv = rtl_priv(hw);
80 u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20;
81 u8 *bufferPtr = (u8 *) buffer;
82 u32 i, offset, blockCount, remainSize;
83
84 blockCount = size / blockSize;
85 remainSize = size % blockSize;
86
87 for (i = 0; i < blockCount; i++) {
88 offset = i * blockSize;
89 rtlpriv->io.writeN_sync(rtlpriv,
90 (FW_8192C_START_ADDRESS + offset),
91 (void *)(bufferPtr + offset),
92 blockSize);
93 }
94
95 if (remainSize) {
96 offset = blockCount * blockSize;
97 rtlpriv->io.writeN_sync(rtlpriv,
98 (FW_8192C_START_ADDRESS + offset),
99 (void *)(bufferPtr + offset),
100 remainSize);
101 }
102}
103
75static void _rtl92c_fw_block_write(struct ieee80211_hw *hw, 104static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
76 const u8 *buffer, u32 size) 105 const u8 *buffer, u32 size)
77{ 106{
@@ -80,23 +109,30 @@ static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
80 u8 *bufferPtr = (u8 *) buffer; 109 u8 *bufferPtr = (u8 *) buffer;
81 u32 *pu4BytePtr = (u32 *) buffer; 110 u32 *pu4BytePtr = (u32 *) buffer;
82 u32 i, offset, blockCount, remainSize; 111 u32 i, offset, blockCount, remainSize;
112 u32 data;
83 113
114 if (rtlpriv->io.writeN_sync) {
115 rtl_block_fw_writeN(hw, buffer, size);
116 return;
117 }
84 blockCount = size / blockSize; 118 blockCount = size / blockSize;
85 remainSize = size % blockSize; 119 remainSize = size % blockSize;
120 if (remainSize) {
121 /* the last word is < 4 bytes - pad it with zeros */
122 for (i = 0; i < 4 - remainSize; i++)
123 *(bufferPtr + size + i) = 0;
124 blockCount++;
125 }
86 126
87 for (i = 0; i < blockCount; i++) { 127 for (i = 0; i < blockCount; i++) {
88 offset = i * blockSize; 128 offset = i * blockSize;
129 /* for big-endian platforms, the firmware data need to be byte
130 * swapped as it was read as a byte string and will be written
131 * as 32-bit dwords and byte swapped when written
132 */
133 data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i));
89 rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), 134 rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
90 *(pu4BytePtr + i)); 135 data);
91 }
92
93 if (remainSize) {
94 offset = blockCount * blockSize;
95 bufferPtr += offset;
96 for (i = 0; i < remainSize; i++) {
97 rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
98 offset + i), *(bufferPtr + i));
99 }
100 } 136 }
101} 137}
102 138
@@ -226,10 +262,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
226 u32 fwsize; 262 u32 fwsize;
227 enum version_8192c version = rtlhal->version; 263 enum version_8192c version = rtlhal->version;
228 264
229 pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
230 if (!rtlhal->pfirmware) 265 if (!rtlhal->pfirmware)
231 return 1; 266 return 1;
232 267
268 pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
233 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; 269 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
234 pfwdata = (u8 *) rtlhal->pfirmware; 270 pfwdata = (u8 *) rtlhal->pfirmware;
235 fwsize = rtlhal->fwsize; 271 fwsize = rtlhal->fwsize;
@@ -237,8 +273,9 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
237 if (IS_FW_HEADER_EXIST(pfwheader)) { 273 if (IS_FW_HEADER_EXIST(pfwheader)) {
238 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, 274 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
239 ("Firmware Version(%d), Signature(%#x),Size(%d)\n", 275 ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
240 pfwheader->version, pfwheader->signature, 276 le16_to_cpu(pfwheader->version),
241 (uint)sizeof(struct rtl92c_firmware_header))); 277 le16_to_cpu(pfwheader->signature),
278 (uint)sizeof(struct rtl92c_firmware_header)));
242 279
243 pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); 280 pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
244 fwsize = fwsize - sizeof(struct rtl92c_firmware_header); 281 fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3d5823c12621..cec5a3a1cc53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -32,32 +32,32 @@
32 32
33#define FW_8192C_SIZE 0x3000 33#define FW_8192C_SIZE 0x3000
34#define FW_8192C_START_ADDRESS 0x1000 34#define FW_8192C_START_ADDRESS 0x1000
35#define FW_8192C_END_ADDRESS 0x3FFF 35#define FW_8192C_END_ADDRESS 0x1FFF
36#define FW_8192C_PAGE_SIZE 4096 36#define FW_8192C_PAGE_SIZE 4096
37#define FW_8192C_POLLING_DELAY 5 37#define FW_8192C_POLLING_DELAY 5
38#define FW_8192C_POLLING_TIMEOUT_COUNT 100 38#define FW_8192C_POLLING_TIMEOUT_COUNT 100
39 39
40#define IS_FW_HEADER_EXIST(_pfwhdr) \ 40#define IS_FW_HEADER_EXIST(_pfwhdr) \
41 ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\ 41 ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
42 (_pfwhdr->signature&0xFFF0) == 0x88C0) 42 (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0)
43 43
44struct rtl92c_firmware_header { 44struct rtl92c_firmware_header {
45 u16 signature; 45 __le16 signature;
46 u8 category; 46 u8 category;
47 u8 function; 47 u8 function;
48 u16 version; 48 __le16 version;
49 u8 subversion; 49 u8 subversion;
50 u8 rsvd1; 50 u8 rsvd1;
51 u8 month; 51 u8 month;
52 u8 date; 52 u8 date;
53 u8 hour; 53 u8 hour;
54 u8 minute; 54 u8 minute;
55 u16 ramcodeSize; 55 __le16 ramcodeSize;
56 u16 rsvd2; 56 __le16 rsvd2;
57 u32 svnindex; 57 __le32 svnindex;
58 u32 rsvd3; 58 __le32 rsvd3;
59 u32 rsvd4; 59 __le32 rsvd4;
60 u32 rsvd5; 60 __le32 rsvd5;
61}; 61};
62 62
63enum rtl8192c_h2c_cmd { 63enum rtl8192c_h2c_cmd {
@@ -94,5 +94,6 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
94void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 94void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
95void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); 95void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
96void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 96void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
97void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
97 98
98#endif 99#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
index 2f624fc27499..605ff191aeb7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/main.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/main.c
@@ -27,6 +27,7 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/module.h>
30#include "../wifi.h" 31#include "../wifi.h"
31 32
32 33
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 3b11642d3f7d..1f07558debf2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -27,6 +27,7 @@
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/export.h>
30#include "../wifi.h" 31#include "../wifi.h"
31#include "../rtl8192ce/reg.h" 32#include "../rtl8192ce/reg.h"
32#include "../rtl8192ce/def.h" 33#include "../rtl8192ce/def.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10ac5929..3b585aadabfc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
569 } 569 }
570 case ERFSLEEP:{ 570 case ERFSLEEP:{
571 if (ppsc->rfpwr_state == ERFOFF) 571 if (ppsc->rfpwr_state == ERFOFF)
572 break; 572 return false;
573 for (queue_id = 0, i = 0; 573 for (queue_id = 0, i = 0;
574 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 574 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
575 ring = &pcipriv->dev.tx_ring[queue_id]; 575 ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index a48404cc2b96..f2aa33dc4d78 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -28,6 +28,7 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
31#include <linux/module.h>
31 32
32#include "../wifi.h" 33#include "../wifi.h"
33#include "../core.h" 34#include "../core.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 814c05df51e8..4ed973a3aa17 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -498,7 +498,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
498 } 498 }
499 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"), 499 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
500 hwinfo, HWSET_MAX_SIZE); 500 hwinfo, HWSET_MAX_SIZE);
501 eeprom_id = *((u16 *)&hwinfo[0]); 501 eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
502 if (eeprom_id != RTL8190_EEPROM_ID) { 502 if (eeprom_id != RTL8190_EEPROM_ID) {
503 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 503 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
504 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id)); 504 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
@@ -516,13 +516,14 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
516 pr_info("MAC address: %pM\n", rtlefuse->dev_addr); 516 pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
517 _rtl92cu_read_txpower_info_from_hwpg(hw, 517 _rtl92cu_read_txpower_info_from_hwpg(hw,
518 rtlefuse->autoload_failflag, hwinfo); 518 rtlefuse->autoload_failflag, hwinfo);
519 rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; 519 rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
520 rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; 520 rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
521 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 521 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
522 (" VID = 0x%02x PID = 0x%02x\n", 522 (" VID = 0x%02x PID = 0x%02x\n",
523 rtlefuse->eeprom_vid, rtlefuse->eeprom_did)); 523 rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
524 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 524 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
525 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; 525 rtlefuse->eeprom_version =
526 le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
526 rtlefuse->txpwr_fromeprom = true; 527 rtlefuse->txpwr_fromeprom = true;
527 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 528 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
528 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 529 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 060a06f4a885..9e0c8fcdf90f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -84,6 +84,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
84 } 84 }
85 } 85 }
86 rtlhal->version = (enum version_8192c)chip_version; 86 rtlhal->version = (enum version_8192c)chip_version;
87 pr_info("rtl8192cu: Chip version 0x%x\n", chip_version);
87 switch (rtlhal->version) { 88 switch (rtlhal->version) {
88 case VERSION_NORMAL_TSMC_CHIP_92C_1T2R: 89 case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
89 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 90 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 72852900df84..e49cf2244c75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
548 break; 548 break;
549 case ERFSLEEP: 549 case ERFSLEEP:
550 if (ppsc->rfpwr_state == ERFOFF) 550 if (ppsc->rfpwr_state == ERFOFF)
551 break; 551 return false;
552 for (queue_id = 0, i = 0; 552 for (queue_id = 0, i = 0;
553 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 553 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
554 ring = &pcipriv->dev.tx_ring[queue_id]; 554 ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index b9a158e5eb0e..94a3e1706158 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -42,6 +42,7 @@
42#include "led.h" 42#include "led.h"
43#include "hw.h" 43#include "hw.h"
44#include <linux/vmalloc.h> 44#include <linux/vmalloc.h>
45#include <linux/module.h>
45 46
46MODULE_AUTHOR("Georgia <georgia@realtek.com>"); 47MODULE_AUTHOR("Georgia <georgia@realtek.com>");
47MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>"); 48MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index bc33b147f44f..b3cc7b949992 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,7 +491,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
491 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); 491 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
492 for (index = 0; index < 16; index++) 492 for (index = 0; index < 16; index++)
493 checksum = checksum ^ (*(ptr + index)); 493 checksum = checksum ^ (*(ptr + index));
494 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); 494 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
495} 495}
496 496
497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1c5509..0883349e1c83 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
3374 break; 3374 break;
3375 case ERFSLEEP: 3375 case ERFSLEEP:
3376 if (ppsc->rfpwr_state == ERFOFF) 3376 if (ppsc->rfpwr_state == ERFOFF)
3377 break; 3377 return false;
3378 3378
3379 for (queue_id = 0, i = 0; 3379 for (queue_id = 0, i = 0;
3380 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 3380 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 691f80092185..149493f4c25c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -30,6 +30,7 @@
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 31
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/module.h>
33 34
34#include "../wifi.h" 35#include "../wifi.h"
35#include "../core.h" 36#include "../core.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171af979c..f10ac1ad9087 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
602 } 602 }
603 case ERFSLEEP: 603 case ERFSLEEP:
604 if (ppsc->rfpwr_state == ERFOFF) 604 if (ppsc->rfpwr_state == ERFOFF)
605 break; 605 return false;
606 606
607 for (queue_id = 0, i = 0; 607 for (queue_id = 0, i = 0;
608 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { 608 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 3ec9a0d41baf..92f49d522c56 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -30,6 +30,7 @@
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 31
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/module.h>
33 34
34#include "../wifi.h" 35#include "../wifi.h"
35#include "../core.h" 36#include "../core.h"
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index b42c2e2b2055..e956fa71d040 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -28,18 +28,20 @@
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 29
30#include <linux/usb.h> 30#include <linux/usb.h>
31#include <linux/export.h>
31#include "core.h" 32#include "core.h"
32#include "wifi.h" 33#include "wifi.h"
33#include "usb.h" 34#include "usb.h"
34#include "base.h" 35#include "base.h"
35#include "ps.h" 36#include "ps.h"
37#include "rtl8192c/fw_common.h"
36 38
37#define REALTEK_USB_VENQT_READ 0xC0 39#define REALTEK_USB_VENQT_READ 0xC0
38#define REALTEK_USB_VENQT_WRITE 0x40 40#define REALTEK_USB_VENQT_WRITE 0x40
39#define REALTEK_USB_VENQT_CMD_REQ 0x05 41#define REALTEK_USB_VENQT_CMD_REQ 0x05
40#define REALTEK_USB_VENQT_CMD_IDX 0x00 42#define REALTEK_USB_VENQT_CMD_IDX 0x00
41 43
42#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254 44#define MAX_USBCTRL_VENDORREQ_TIMES 10
43 45
44static void usbctrl_async_callback(struct urb *urb) 46static void usbctrl_async_callback(struct urb *urb)
45{ 47{
@@ -81,6 +83,7 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
81 dr->wValue = cpu_to_le16(value); 83 dr->wValue = cpu_to_le16(value);
82 dr->wIndex = cpu_to_le16(index); 84 dr->wIndex = cpu_to_le16(index);
83 dr->wLength = cpu_to_le16(len); 85 dr->wLength = cpu_to_le16(len);
86 /* data are already in little-endian order */
84 memcpy(buf, pdata, len); 87 memcpy(buf, pdata, len);
85 usb_fill_control_urb(urb, udev, pipe, 88 usb_fill_control_urb(urb, udev, pipe,
86 (unsigned char *)dr, buf, len, 89 (unsigned char *)dr, buf, len,
@@ -99,16 +102,28 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
99 unsigned int pipe; 102 unsigned int pipe;
100 int status; 103 int status;
101 u8 reqtype; 104 u8 reqtype;
105 int vendorreq_times = 0;
106 static int count;
102 107
103 pipe = usb_rcvctrlpipe(udev, 0); /* read_in */ 108 pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
104 reqtype = REALTEK_USB_VENQT_READ; 109 reqtype = REALTEK_USB_VENQT_READ;
105 110
106 status = usb_control_msg(udev, pipe, request, reqtype, value, index, 111 do {
107 pdata, len, 0); /* max. timeout */ 112 status = usb_control_msg(udev, pipe, request, reqtype, value,
113 index, pdata, len, 0); /*max. timeout*/
114 if (status < 0) {
115 /* firmware download is checksumed, don't retry */
116 if ((value >= FW_8192C_START_ADDRESS &&
117 value <= FW_8192C_END_ADDRESS))
118 break;
119 } else {
120 break;
121 }
122 } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
108 123
109 if (status < 0) 124 if (status < 0 && count++ < 4)
110 pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n", 125 pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
111 value, status, *(u32 *)pdata); 126 value, status, le32_to_cpu(*(u32 *)pdata));
112 return status; 127 return status;
113} 128}
114 129
@@ -128,7 +143,7 @@ static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
128 143
129 wvalue = (u16)addr; 144 wvalue = (u16)addr;
130 _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len); 145 _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
131 ret = *data; 146 ret = le32_to_cpu(*data);
132 kfree(data); 147 kfree(data);
133 return ret; 148 return ret;
134} 149}
@@ -160,12 +175,12 @@ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
160 u8 request; 175 u8 request;
161 u16 wvalue; 176 u16 wvalue;
162 u16 index; 177 u16 index;
163 u32 data; 178 __le32 data;
164 179
165 request = REALTEK_USB_VENQT_CMD_REQ; 180 request = REALTEK_USB_VENQT_CMD_REQ;
166 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ 181 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
167 wvalue = (u16)(addr&0x0000ffff); 182 wvalue = (u16)(addr&0x0000ffff);
168 data = val; 183 data = cpu_to_le32(val);
169 _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data, 184 _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
170 len); 185 len);
171} 186}
@@ -191,6 +206,30 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
191 _usb_write_async(to_usb_device(dev), addr, val, 4); 206 _usb_write_async(to_usb_device(dev), addr, val, 4);
192} 207}
193 208
209static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
210 u16 len)
211{
212 struct device *dev = rtlpriv->io.dev;
213 struct usb_device *udev = to_usb_device(dev);
214 u8 request = REALTEK_USB_VENQT_CMD_REQ;
215 u8 reqtype = REALTEK_USB_VENQT_WRITE;
216 u16 wvalue;
217 u16 index = REALTEK_USB_VENQT_CMD_IDX;
218 int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
219 u8 *buffer;
220 dma_addr_t dma_addr;
221
222 wvalue = (u16)(addr&0x0000ffff);
223 buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
224 if (!buffer)
225 return;
226 memcpy(buffer, data, len);
227 usb_control_msg(udev, pipe, request, reqtype, wvalue,
228 index, buffer, len, 50);
229
230 usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
231}
232
194static void _rtl_usb_io_handler_init(struct device *dev, 233static void _rtl_usb_io_handler_init(struct device *dev,
195 struct ieee80211_hw *hw) 234 struct ieee80211_hw *hw)
196{ 235{
@@ -204,6 +243,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
204 rtlpriv->io.read8_sync = _usb_read8_sync; 243 rtlpriv->io.read8_sync = _usb_read8_sync;
205 rtlpriv->io.read16_sync = _usb_read16_sync; 244 rtlpriv->io.read16_sync = _usb_read16_sync;
206 rtlpriv->io.read32_sync = _usb_read32_sync; 245 rtlpriv->io.read32_sync = _usb_read32_sync;
246 rtlpriv->io.writeN_sync = _usb_writeN_sync;
207} 247}
208 248
209static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) 249static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 713c7ddba8eb..085dccdbd1b6 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -63,6 +63,7 @@
63#define AC_MAX 4 63#define AC_MAX 4
64#define QOS_QUEUE_NUM 4 64#define QOS_QUEUE_NUM 4
65#define RTL_MAC80211_NUM_QUEUE 5 65#define RTL_MAC80211_NUM_QUEUE 5
66#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
66 67
67#define QBSS_LOAD_SIZE 5 68#define QBSS_LOAD_SIZE 5
68#define MAX_WMMELE_LENGTH 64 69#define MAX_WMMELE_LENGTH 64
@@ -943,8 +944,10 @@ struct rtl_io {
943 unsigned long pci_base_addr; /*device I/O address */ 944 unsigned long pci_base_addr; /*device I/O address */
944 945
945 void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val); 946 void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
946 void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val); 947 void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
947 void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val); 948 void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
949 void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf,
950 u16 len);
948 951
949 u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); 952 u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
950 u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); 953 u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
@@ -1541,14 +1544,13 @@ struct rtl_hal_cfg {
1541struct rtl_locks { 1544struct rtl_locks {
1542 /* mutex */ 1545 /* mutex */
1543 struct mutex conf_mutex; 1546 struct mutex conf_mutex;
1547 struct mutex ps_mutex;
1544 1548
1545 /*spin lock */ 1549 /*spin lock */
1546 spinlock_t ips_lock;
1547 spinlock_t irq_th_lock; 1550 spinlock_t irq_th_lock;
1548 spinlock_t h2c_lock; 1551 spinlock_t h2c_lock;
1549 spinlock_t rf_ps_lock; 1552 spinlock_t rf_ps_lock;
1550 spinlock_t rf_lock; 1553 spinlock_t rf_lock;
1551 spinlock_t lps_lock;
1552 spinlock_t waitq_lock; 1554 spinlock_t waitq_lock;
1553 1555
1554 /*Dual mac*/ 1556 /*Dual mac*/
@@ -1573,7 +1575,8 @@ struct rtl_works {
1573 /* For SW LPS */ 1575 /* For SW LPS */
1574 struct delayed_work ps_work; 1576 struct delayed_work ps_work;
1575 struct delayed_work ps_rfon_wq; 1577 struct delayed_work ps_rfon_wq;
1576 struct tasklet_struct ips_leave_tasklet; 1578
1579 struct work_struct lps_leave_work;
1577}; 1580};
1578 1581
1579struct rtl_debug { 1582struct rtl_debug {
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index eaa5f9556200..6248c354fc5c 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -319,7 +319,6 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
319static struct spi_driver wl1251_spi_driver = { 319static struct spi_driver wl1251_spi_driver = {
320 .driver = { 320 .driver = {
321 .name = DRIVER_NAME, 321 .name = DRIVER_NAME,
322 .bus = &spi_bus_type,
323 .owner = THIS_MODULE, 322 .owner = THIS_MODULE,
324 }, 323 },
325 324
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 3fe388b87c2e..af08c8609c63 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -42,16 +42,6 @@ config WL12XX_SDIO
42 If you choose to build a module, it'll be called wl12xx_sdio. 42 If you choose to build a module, it'll be called wl12xx_sdio.
43 Say N if unsure. 43 Say N if unsure.
44 44
45config WL12XX_SDIO_TEST
46 tristate "TI wl12xx SDIO testing support"
47 depends on WL12XX && MMC && WL12XX_SDIO
48 default n
49 ---help---
50 This module adds support for the SDIO bus testing with the
51 TI wl12xx chipsets. You probably don't want this unless you are
52 testing a new hardware platform. Select this if you want to test the
53 SDIO bus which is connected to the wl12xx chip.
54
55config WL12XX_PLATFORM_DATA 45config WL12XX_PLATFORM_DATA
56 bool 46 bool
57 depends on WL12XX_SDIO != n || WL1251_SDIO != n 47 depends on WL12XX_SDIO != n || WL1251_SDIO != n
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 621b3483ca2c..fe67262ba19f 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -3,14 +3,11 @@ wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
3 3
4wl12xx_spi-objs = spi.o 4wl12xx_spi-objs = spi.o
5wl12xx_sdio-objs = sdio.o 5wl12xx_sdio-objs = sdio.o
6wl12xx_sdio_test-objs = sdio_test.o
7 6
8wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o 7wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
9obj-$(CONFIG_WL12XX) += wl12xx.o 8obj-$(CONFIG_WL12XX) += wl12xx.o
10obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o 9obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
11obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o 10obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
12 11
13obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o
14
15# small builtin driver bit 12# small builtin driver bit
16obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o 13obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index ca044a743191..7537c401a448 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -29,11 +29,12 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl12xx.h" 31#include "wl12xx.h"
32#include "debug.h"
32#include "wl12xx_80211.h" 33#include "wl12xx_80211.h"
33#include "reg.h" 34#include "reg.h"
34#include "ps.h" 35#include "ps.h"
35 36
36int wl1271_acx_wake_up_conditions(struct wl1271 *wl) 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
37{ 38{
38 struct acx_wake_up_condition *wake_up; 39 struct acx_wake_up_condition *wake_up;
39 int ret; 40 int ret;
@@ -46,7 +47,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
46 goto out; 47 goto out;
47 } 48 }
48 49
49 wake_up->role_id = wl->role_id; 50 wake_up->role_id = wlvif->role_id;
50 wake_up->wake_up_event = wl->conf.conn.wake_up_event; 51 wake_up->wake_up_event = wl->conf.conn.wake_up_event;
51 wake_up->listen_interval = wl->conf.conn.listen_interval; 52 wake_up->listen_interval = wl->conf.conn.listen_interval;
52 53
@@ -84,7 +85,8 @@ out:
84 return ret; 85 return ret;
85} 86}
86 87
87int wl1271_acx_tx_power(struct wl1271 *wl, int power) 88int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
89 int power)
88{ 90{
89 struct acx_current_tx_power *acx; 91 struct acx_current_tx_power *acx;
90 int ret; 92 int ret;
@@ -100,7 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
100 goto out; 102 goto out;
101 } 103 }
102 104
103 acx->role_id = wl->role_id; 105 acx->role_id = wlvif->role_id;
104 acx->current_tx_power = power * 10; 106 acx->current_tx_power = power * 10;
105 107
106 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); 108 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -114,7 +116,7 @@ out:
114 return ret; 116 return ret;
115} 117}
116 118
117int wl1271_acx_feature_cfg(struct wl1271 *wl) 119int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
118{ 120{
119 struct acx_feature_config *feature; 121 struct acx_feature_config *feature;
120 int ret; 122 int ret;
@@ -128,7 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
128 } 130 }
129 131
130 /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ 132 /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
131 feature->role_id = wl->role_id; 133 feature->role_id = wlvif->role_id;
132 feature->data_flow_options = 0; 134 feature->data_flow_options = 0;
133 feature->options = 0; 135 feature->options = 0;
134 136
@@ -184,33 +186,8 @@ out:
184 return ret; 186 return ret;
185} 187}
186 188
187int wl1271_acx_pd_threshold(struct wl1271 *wl) 189int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
188{ 190 enum acx_slot_type slot_time)
189 struct acx_packet_detection *pd;
190 int ret;
191
192 wl1271_debug(DEBUG_ACX, "acx data pd threshold");
193
194 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
195 if (!pd) {
196 ret = -ENOMEM;
197 goto out;
198 }
199
200 pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
201
202 ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
203 if (ret < 0) {
204 wl1271_warning("failed to set pd threshold: %d", ret);
205 goto out;
206 }
207
208out:
209 kfree(pd);
210 return ret;
211}
212
213int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
214{ 191{
215 struct acx_slot *slot; 192 struct acx_slot *slot;
216 int ret; 193 int ret;
@@ -223,7 +200,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
223 goto out; 200 goto out;
224 } 201 }
225 202
226 slot->role_id = wl->role_id; 203 slot->role_id = wlvif->role_id;
227 slot->wone_index = STATION_WONE_INDEX; 204 slot->wone_index = STATION_WONE_INDEX;
228 slot->slot_time = slot_time; 205 slot->slot_time = slot_time;
229 206
@@ -238,8 +215,8 @@ out:
238 return ret; 215 return ret;
239} 216}
240 217
241int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, 218int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
242 void *mc_list, u32 mc_list_len) 219 bool enable, void *mc_list, u32 mc_list_len)
243{ 220{
244 struct acx_dot11_grp_addr_tbl *acx; 221 struct acx_dot11_grp_addr_tbl *acx;
245 int ret; 222 int ret;
@@ -253,7 +230,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
253 } 230 }
254 231
255 /* MAC filtering */ 232 /* MAC filtering */
256 acx->role_id = wl->role_id; 233 acx->role_id = wlvif->role_id;
257 acx->enabled = enable; 234 acx->enabled = enable;
258 acx->num_groups = mc_list_len; 235 acx->num_groups = mc_list_len;
259 memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN); 236 memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -270,7 +247,8 @@ out:
270 return ret; 247 return ret;
271} 248}
272 249
273int wl1271_acx_service_period_timeout(struct wl1271 *wl) 250int wl1271_acx_service_period_timeout(struct wl1271 *wl,
251 struct wl12xx_vif *wlvif)
274{ 252{
275 struct acx_rx_timeout *rx_timeout; 253 struct acx_rx_timeout *rx_timeout;
276 int ret; 254 int ret;
@@ -283,7 +261,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
283 261
284 wl1271_debug(DEBUG_ACX, "acx service period timeout"); 262 wl1271_debug(DEBUG_ACX, "acx service period timeout");
285 263
286 rx_timeout->role_id = wl->role_id; 264 rx_timeout->role_id = wlvif->role_id;
287 rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout); 265 rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
288 rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout); 266 rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
289 267
@@ -300,7 +278,8 @@ out:
300 return ret; 278 return ret;
301} 279}
302 280
303int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold) 281int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
282 u32 rts_threshold)
304{ 283{
305 struct acx_rts_threshold *rts; 284 struct acx_rts_threshold *rts;
306 int ret; 285 int ret;
@@ -320,7 +299,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
320 goto out; 299 goto out;
321 } 300 }
322 301
323 rts->role_id = wl->role_id; 302 rts->role_id = wlvif->role_id;
324 rts->threshold = cpu_to_le16((u16)rts_threshold); 303 rts->threshold = cpu_to_le16((u16)rts_threshold);
325 304
326 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); 305 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -363,7 +342,8 @@ out:
363 return ret; 342 return ret;
364} 343}
365 344
366int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter) 345int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
346 bool enable_filter)
367{ 347{
368 struct acx_beacon_filter_option *beacon_filter = NULL; 348 struct acx_beacon_filter_option *beacon_filter = NULL;
369 int ret = 0; 349 int ret = 0;
@@ -380,7 +360,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
380 goto out; 360 goto out;
381 } 361 }
382 362
383 beacon_filter->role_id = wl->role_id; 363 beacon_filter->role_id = wlvif->role_id;
384 beacon_filter->enable = enable_filter; 364 beacon_filter->enable = enable_filter;
385 365
386 /* 366 /*
@@ -401,7 +381,8 @@ out:
401 return ret; 381 return ret;
402} 382}
403 383
404int wl1271_acx_beacon_filter_table(struct wl1271 *wl) 384int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
385 struct wl12xx_vif *wlvif)
405{ 386{
406 struct acx_beacon_filter_ie_table *ie_table; 387 struct acx_beacon_filter_ie_table *ie_table;
407 int i, idx = 0; 388 int i, idx = 0;
@@ -417,7 +398,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
417 } 398 }
418 399
419 /* configure default beacon pass-through rules */ 400 /* configure default beacon pass-through rules */
420 ie_table->role_id = wl->role_id; 401 ie_table->role_id = wlvif->role_id;
421 ie_table->num_ie = 0; 402 ie_table->num_ie = 0;
422 for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) { 403 for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
423 struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]); 404 struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -458,7 +439,8 @@ out:
458 439
459#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff 440#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
460 441
461int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable) 442int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
443 bool enable)
462{ 444{
463 struct acx_conn_monit_params *acx; 445 struct acx_conn_monit_params *acx;
464 u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE; 446 u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
@@ -479,7 +461,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
479 timeout = wl->conf.conn.bss_lose_timeout; 461 timeout = wl->conf.conn.bss_lose_timeout;
480 } 462 }
481 463
482 acx->role_id = wl->role_id; 464 acx->role_id = wlvif->role_id;
483 acx->synch_fail_thold = cpu_to_le32(threshold); 465 acx->synch_fail_thold = cpu_to_le32(threshold);
484 acx->bss_lose_timeout = cpu_to_le32(timeout); 466 acx->bss_lose_timeout = cpu_to_le32(timeout);
485 467
@@ -582,7 +564,7 @@ out:
582 return ret; 564 return ret;
583} 565}
584 566
585int wl1271_acx_bcn_dtim_options(struct wl1271 *wl) 567int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif)
586{ 568{
587 struct acx_beacon_broadcast *bb; 569 struct acx_beacon_broadcast *bb;
588 int ret; 570 int ret;
@@ -595,7 +577,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
595 goto out; 577 goto out;
596 } 578 }
597 579
598 bb->role_id = wl->role_id; 580 bb->role_id = wlvif->role_id;
599 bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout); 581 bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
600 bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout); 582 bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
601 bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps; 583 bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -612,7 +594,7 @@ out:
612 return ret; 594 return ret;
613} 595}
614 596
615int wl1271_acx_aid(struct wl1271 *wl, u16 aid) 597int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid)
616{ 598{
617 struct acx_aid *acx_aid; 599 struct acx_aid *acx_aid;
618 int ret; 600 int ret;
@@ -625,7 +607,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
625 goto out; 607 goto out;
626 } 608 }
627 609
628 acx_aid->role_id = wl->role_id; 610 acx_aid->role_id = wlvif->role_id;
629 acx_aid->aid = cpu_to_le16(aid); 611 acx_aid->aid = cpu_to_le16(aid);
630 612
631 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); 613 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -668,7 +650,8 @@ out:
668 return ret; 650 return ret;
669} 651}
670 652
671int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble) 653int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
654 enum acx_preamble_type preamble)
672{ 655{
673 struct acx_preamble *acx; 656 struct acx_preamble *acx;
674 int ret; 657 int ret;
@@ -681,7 +664,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
681 goto out; 664 goto out;
682 } 665 }
683 666
684 acx->role_id = wl->role_id; 667 acx->role_id = wlvif->role_id;
685 acx->preamble = preamble; 668 acx->preamble = preamble;
686 669
687 ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); 670 ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -695,7 +678,7 @@ out:
695 return ret; 678 return ret;
696} 679}
697 680
698int wl1271_acx_cts_protect(struct wl1271 *wl, 681int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
699 enum acx_ctsprotect_type ctsprotect) 682 enum acx_ctsprotect_type ctsprotect)
700{ 683{
701 struct acx_ctsprotect *acx; 684 struct acx_ctsprotect *acx;
@@ -709,7 +692,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
709 goto out; 692 goto out;
710 } 693 }
711 694
712 acx->role_id = wl->role_id; 695 acx->role_id = wlvif->role_id;
713 acx->ctsprotect = ctsprotect; 696 acx->ctsprotect = ctsprotect;
714 697
715 ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); 698 ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -739,7 +722,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
739 return 0; 722 return 0;
740} 723}
741 724
742int wl1271_acx_sta_rate_policies(struct wl1271 *wl) 725int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
743{ 726{
744 struct acx_rate_policy *acx; 727 struct acx_rate_policy *acx;
745 struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf; 728 struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
@@ -755,11 +738,11 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
755 } 738 }
756 739
757 wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x", 740 wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
758 wl->basic_rate, wl->rate_set); 741 wlvif->basic_rate, wlvif->rate_set);
759 742
760 /* configure one basic rate class */ 743 /* configure one basic rate class */
761 acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE); 744 acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx);
762 acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate); 745 acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate);
763 acx->rate_policy.short_retry_limit = c->short_retry_limit; 746 acx->rate_policy.short_retry_limit = c->short_retry_limit;
764 acx->rate_policy.long_retry_limit = c->long_retry_limit; 747 acx->rate_policy.long_retry_limit = c->long_retry_limit;
765 acx->rate_policy.aflags = c->aflags; 748 acx->rate_policy.aflags = c->aflags;
@@ -771,8 +754,8 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
771 } 754 }
772 755
773 /* configure one AP supported rate class */ 756 /* configure one AP supported rate class */
774 acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE); 757 acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
775 acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set); 758 acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set);
776 acx->rate_policy.short_retry_limit = c->short_retry_limit; 759 acx->rate_policy.short_retry_limit = c->short_retry_limit;
777 acx->rate_policy.long_retry_limit = c->long_retry_limit; 760 acx->rate_policy.long_retry_limit = c->long_retry_limit;
778 acx->rate_policy.aflags = c->aflags; 761 acx->rate_policy.aflags = c->aflags;
@@ -788,7 +771,7 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
788 * (p2p packets should always go out with OFDM rates, even 771 * (p2p packets should always go out with OFDM rates, even
789 * if we are currently connected to 11b AP) 772 * if we are currently connected to 11b AP)
790 */ 773 */
791 acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P); 774 acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx);
792 acx->rate_policy.enabled_rates = 775 acx->rate_policy.enabled_rates =
793 cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P); 776 cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
794 acx->rate_policy.short_retry_limit = c->short_retry_limit; 777 acx->rate_policy.short_retry_limit = c->short_retry_limit;
@@ -839,8 +822,8 @@ out:
839 return ret; 822 return ret;
840} 823}
841 824
842int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, 825int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
843 u8 aifsn, u16 txop) 826 u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop)
844{ 827{
845 struct acx_ac_cfg *acx; 828 struct acx_ac_cfg *acx;
846 int ret = 0; 829 int ret = 0;
@@ -855,7 +838,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
855 goto out; 838 goto out;
856 } 839 }
857 840
858 acx->role_id = wl->role_id; 841 acx->role_id = wlvif->role_id;
859 acx->ac = ac; 842 acx->ac = ac;
860 acx->cw_min = cw_min; 843 acx->cw_min = cw_min;
861 acx->cw_max = cpu_to_le16(cw_max); 844 acx->cw_max = cpu_to_le16(cw_max);
@@ -873,7 +856,8 @@ out:
873 return ret; 856 return ret;
874} 857}
875 858
876int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, 859int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
860 u8 queue_id, u8 channel_type,
877 u8 tsid, u8 ps_scheme, u8 ack_policy, 861 u8 tsid, u8 ps_scheme, u8 ack_policy,
878 u32 apsd_conf0, u32 apsd_conf1) 862 u32 apsd_conf0, u32 apsd_conf1)
879{ 863{
@@ -889,7 +873,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
889 goto out; 873 goto out;
890 } 874 }
891 875
892 acx->role_id = wl->role_id; 876 acx->role_id = wlvif->role_id;
893 acx->queue_id = queue_id; 877 acx->queue_id = queue_id;
894 acx->channel_type = channel_type; 878 acx->channel_type = channel_type;
895 acx->tsid = tsid; 879 acx->tsid = tsid;
@@ -1098,7 +1082,8 @@ out:
1098 return ret; 1082 return ret;
1099} 1083}
1100 1084
1101int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable) 1085int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1086 bool enable)
1102{ 1087{
1103 struct wl1271_acx_bet_enable *acx = NULL; 1088 struct wl1271_acx_bet_enable *acx = NULL;
1104 int ret = 0; 1089 int ret = 0;
@@ -1114,7 +1099,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
1114 goto out; 1099 goto out;
1115 } 1100 }
1116 1101
1117 acx->role_id = wl->role_id; 1102 acx->role_id = wlvif->role_id;
1118 acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE; 1103 acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
1119 acx->max_consecutive = wl->conf.conn.bet_max_consecutive; 1104 acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
1120 1105
@@ -1129,7 +1114,8 @@ out:
1129 return ret; 1114 return ret;
1130} 1115}
1131 1116
1132int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address) 1117int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1118 u8 enable, __be32 address)
1133{ 1119{
1134 struct wl1271_acx_arp_filter *acx; 1120 struct wl1271_acx_arp_filter *acx;
1135 int ret; 1121 int ret;
@@ -1142,7 +1128,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
1142 goto out; 1128 goto out;
1143 } 1129 }
1144 1130
1145 acx->role_id = wl->role_id; 1131 acx->role_id = wlvif->role_id;
1146 acx->version = ACX_IPV4_VERSION; 1132 acx->version = ACX_IPV4_VERSION;
1147 acx->enable = enable; 1133 acx->enable = enable;
1148 1134
@@ -1189,7 +1175,8 @@ out:
1189 return ret; 1175 return ret;
1190} 1176}
1191 1177
1192int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable) 1178int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1179 bool enable)
1193{ 1180{
1194 struct wl1271_acx_keep_alive_mode *acx = NULL; 1181 struct wl1271_acx_keep_alive_mode *acx = NULL;
1195 int ret = 0; 1182 int ret = 0;
@@ -1202,7 +1189,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
1202 goto out; 1189 goto out;
1203 } 1190 }
1204 1191
1205 acx->role_id = wl->role_id; 1192 acx->role_id = wlvif->role_id;
1206 acx->enabled = enable; 1193 acx->enabled = enable;
1207 1194
1208 ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx)); 1195 ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1216,7 +1203,8 @@ out:
1216 return ret; 1203 return ret;
1217} 1204}
1218 1205
1219int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid) 1206int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1207 u8 index, u8 tpl_valid)
1220{ 1208{
1221 struct wl1271_acx_keep_alive_config *acx = NULL; 1209 struct wl1271_acx_keep_alive_config *acx = NULL;
1222 int ret = 0; 1210 int ret = 0;
@@ -1229,7 +1217,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
1229 goto out; 1217 goto out;
1230 } 1218 }
1231 1219
1232 acx->role_id = wl->role_id; 1220 acx->role_id = wlvif->role_id;
1233 acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval); 1221 acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
1234 acx->index = index; 1222 acx->index = index;
1235 acx->tpl_validation = tpl_valid; 1223 acx->tpl_validation = tpl_valid;
@@ -1247,8 +1235,8 @@ out:
1247 return ret; 1235 return ret;
1248} 1236}
1249 1237
1250int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, 1238int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1251 s16 thold, u8 hyst) 1239 bool enable, s16 thold, u8 hyst)
1252{ 1240{
1253 struct wl1271_acx_rssi_snr_trigger *acx = NULL; 1241 struct wl1271_acx_rssi_snr_trigger *acx = NULL;
1254 int ret = 0; 1242 int ret = 0;
@@ -1261,9 +1249,9 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1261 goto out; 1249 goto out;
1262 } 1250 }
1263 1251
1264 wl->last_rssi_event = -1; 1252 wlvif->last_rssi_event = -1;
1265 1253
1266 acx->role_id = wl->role_id; 1254 acx->role_id = wlvif->role_id;
1267 acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing); 1255 acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
1268 acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON; 1256 acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
1269 acx->type = WL1271_ACX_TRIG_TYPE_EDGE; 1257 acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1288,7 +1276,8 @@ out:
1288 return ret; 1276 return ret;
1289} 1277}
1290 1278
1291int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl) 1279int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
1280 struct wl12xx_vif *wlvif)
1292{ 1281{
1293 struct wl1271_acx_rssi_snr_avg_weights *acx = NULL; 1282 struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
1294 struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger; 1283 struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
@@ -1302,7 +1291,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
1302 goto out; 1291 goto out;
1303 } 1292 }
1304 1293
1305 acx->role_id = wl->role_id; 1294 acx->role_id = wlvif->role_id;
1306 acx->rssi_beacon = c->avg_weight_rssi_beacon; 1295 acx->rssi_beacon = c->avg_weight_rssi_beacon;
1307 acx->rssi_data = c->avg_weight_rssi_data; 1296 acx->rssi_data = c->avg_weight_rssi_data;
1308 acx->snr_beacon = c->avg_weight_snr_beacon; 1297 acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1367,6 +1356,7 @@ out:
1367} 1356}
1368 1357
1369int wl1271_acx_set_ht_information(struct wl1271 *wl, 1358int wl1271_acx_set_ht_information(struct wl1271 *wl,
1359 struct wl12xx_vif *wlvif,
1370 u16 ht_operation_mode) 1360 u16 ht_operation_mode)
1371{ 1361{
1372 struct wl1271_acx_ht_information *acx; 1362 struct wl1271_acx_ht_information *acx;
@@ -1380,7 +1370,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
1380 goto out; 1370 goto out;
1381 } 1371 }
1382 1372
1383 acx->role_id = wl->role_id; 1373 acx->role_id = wlvif->role_id;
1384 acx->ht_protection = 1374 acx->ht_protection =
1385 (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); 1375 (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
1386 acx->rifs_mode = 0; 1376 acx->rifs_mode = 0;
@@ -1402,7 +1392,8 @@ out:
1402} 1392}
1403 1393
1404/* Configure BA session initiator/receiver parameters setting in the FW. */ 1394/* Configure BA session initiator/receiver parameters setting in the FW. */
1405int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl) 1395int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
1396 struct wl12xx_vif *wlvif)
1406{ 1397{
1407 struct wl1271_acx_ba_initiator_policy *acx; 1398 struct wl1271_acx_ba_initiator_policy *acx;
1408 int ret; 1399 int ret;
@@ -1416,7 +1407,7 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
1416 } 1407 }
1417 1408
1418 /* set for the current role */ 1409 /* set for the current role */
1419 acx->role_id = wl->role_id; 1410 acx->role_id = wlvif->role_id;
1420 acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap; 1411 acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
1421 acx->win_size = wl->conf.ht.tx_ba_win_size; 1412 acx->win_size = wl->conf.ht.tx_ba_win_size;
1422 acx->inactivity_timeout = wl->conf.ht.inactivity_timeout; 1413 acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
@@ -1494,7 +1485,8 @@ out:
1494 return ret; 1485 return ret;
1495} 1486}
1496 1487
1497int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable) 1488int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1489 bool enable)
1498{ 1490{
1499 struct wl1271_acx_ps_rx_streaming *rx_streaming; 1491 struct wl1271_acx_ps_rx_streaming *rx_streaming;
1500 u32 conf_queues, enable_queues; 1492 u32 conf_queues, enable_queues;
@@ -1523,7 +1515,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
1523 if (!(conf_queues & BIT(i))) 1515 if (!(conf_queues & BIT(i)))
1524 continue; 1516 continue;
1525 1517
1526 rx_streaming->role_id = wl->role_id; 1518 rx_streaming->role_id = wlvif->role_id;
1527 rx_streaming->tid = i; 1519 rx_streaming->tid = i;
1528 rx_streaming->enable = enable_queues & BIT(i); 1520 rx_streaming->enable = enable_queues & BIT(i);
1529 rx_streaming->period = wl->conf.rx_streaming.interval; 1521 rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1542,7 +1534,7 @@ out:
1542 return ret; 1534 return ret;
1543} 1535}
1544 1536
1545int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl) 1537int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1546{ 1538{
1547 struct wl1271_acx_ap_max_tx_retry *acx = NULL; 1539 struct wl1271_acx_ap_max_tx_retry *acx = NULL;
1548 int ret; 1540 int ret;
@@ -1553,7 +1545,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
1553 if (!acx) 1545 if (!acx)
1554 return -ENOMEM; 1546 return -ENOMEM;
1555 1547
1556 acx->role_id = wl->role_id; 1548 acx->role_id = wlvif->role_id;
1557 acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries); 1549 acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
1558 1550
1559 ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx)); 1551 ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1567,7 +1559,7 @@ out:
1567 return ret; 1559 return ret;
1568} 1560}
1569 1561
1570int wl1271_acx_config_ps(struct wl1271 *wl) 1562int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1571{ 1563{
1572 struct wl1271_acx_config_ps *config_ps; 1564 struct wl1271_acx_config_ps *config_ps;
1573 int ret; 1565 int ret;
@@ -1582,7 +1574,7 @@ int wl1271_acx_config_ps(struct wl1271 *wl)
1582 1574
1583 config_ps->exit_retries = wl->conf.conn.psm_exit_retries; 1575 config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
1584 config_ps->enter_retries = wl->conf.conn.psm_entry_retries; 1576 config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
1585 config_ps->null_data_rate = cpu_to_le32(wl->basic_rate); 1577 config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate);
1586 1578
1587 ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps, 1579 ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
1588 sizeof(*config_ps)); 1580 sizeof(*config_ps));
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index e3f93b4b3429..69892b40c2df 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -171,13 +171,6 @@ struct acx_rx_msdu_lifetime {
171 __le32 lifetime; 171 __le32 lifetime;
172} __packed; 172} __packed;
173 173
174struct acx_packet_detection {
175 struct acx_header header;
176
177 __le32 threshold;
178} __packed;
179
180
181enum acx_slot_type { 174enum acx_slot_type {
182 SLOT_TIME_LONG = 0, 175 SLOT_TIME_LONG = 0,
183 SLOT_TIME_SHORT = 1, 176 SLOT_TIME_SHORT = 1,
@@ -654,11 +647,6 @@ struct acx_rate_class {
654 u8 reserved; 647 u8 reserved;
655}; 648};
656 649
657#define ACX_TX_BASIC_RATE 0
658#define ACX_TX_AP_FULL_RATE 1
659#define ACX_TX_BASIC_RATE_P2P 2
660#define ACX_TX_AP_MODE_MGMT_RATE 4
661#define ACX_TX_AP_MODE_BCST_RATE 5
662struct acx_rate_policy { 650struct acx_rate_policy {
663 struct acx_header header; 651 struct acx_header header;
664 652
@@ -1234,39 +1222,48 @@ enum {
1234}; 1222};
1235 1223
1236 1224
1237int wl1271_acx_wake_up_conditions(struct wl1271 *wl); 1225int wl1271_acx_wake_up_conditions(struct wl1271 *wl,
1226 struct wl12xx_vif *wlvif);
1238int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); 1227int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
1239int wl1271_acx_tx_power(struct wl1271 *wl, int power); 1228int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1240int wl1271_acx_feature_cfg(struct wl1271 *wl); 1229 int power);
1230int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif);
1241int wl1271_acx_mem_map(struct wl1271 *wl, 1231int wl1271_acx_mem_map(struct wl1271 *wl,
1242 struct acx_header *mem_map, size_t len); 1232 struct acx_header *mem_map, size_t len);
1243int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl); 1233int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
1244int wl1271_acx_pd_threshold(struct wl1271 *wl); 1234int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1245int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time); 1235 enum acx_slot_type slot_time);
1246int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, 1236int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1247 void *mc_list, u32 mc_list_len); 1237 bool enable, void *mc_list, u32 mc_list_len);
1248int wl1271_acx_service_period_timeout(struct wl1271 *wl); 1238int wl1271_acx_service_period_timeout(struct wl1271 *wl,
1249int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold); 1239 struct wl12xx_vif *wlvif);
1240int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1241 u32 rts_threshold);
1250int wl1271_acx_dco_itrim_params(struct wl1271 *wl); 1242int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
1251int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); 1243int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1252int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1244 bool enable_filter);
1253int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable); 1245int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
1246 struct wl12xx_vif *wlvif);
1247int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1248 bool enable);
1254int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable); 1249int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
1255int wl12xx_acx_sg_cfg(struct wl1271 *wl); 1250int wl12xx_acx_sg_cfg(struct wl1271 *wl);
1256int wl1271_acx_cca_threshold(struct wl1271 *wl); 1251int wl1271_acx_cca_threshold(struct wl1271 *wl);
1257int wl1271_acx_bcn_dtim_options(struct wl1271 *wl); 1252int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif);
1258int wl1271_acx_aid(struct wl1271 *wl, u16 aid); 1253int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid);
1259int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask); 1254int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
1260int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble); 1255int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1261int wl1271_acx_cts_protect(struct wl1271 *wl, 1256 enum acx_preamble_type preamble);
1257int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1262 enum acx_ctsprotect_type ctsprotect); 1258 enum acx_ctsprotect_type ctsprotect);
1263int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1259int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
1264int wl1271_acx_sta_rate_policies(struct wl1271 *wl); 1260int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
1265int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, 1261int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
1266 u8 idx); 1262 u8 idx);
1267int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, 1263int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1268 u8 aifsn, u16 txop); 1264 u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop);
1269int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, 1265int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1266 u8 queue_id, u8 channel_type,
1270 u8 tsid, u8 ps_scheme, u8 ack_policy, 1267 u8 tsid, u8 ps_scheme, u8 ack_policy,
1271 u32 apsd_conf0, u32 apsd_conf1); 1268 u32 apsd_conf0, u32 apsd_conf1);
1272int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold); 1269int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
@@ -1276,26 +1273,34 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl);
1276int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap); 1273int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
1277int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1274int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1278int wl1271_acx_smart_reflex(struct wl1271 *wl); 1275int wl1271_acx_smart_reflex(struct wl1271 *wl);
1279int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); 1276int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1280int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address); 1277 bool enable);
1278int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1279 u8 enable, __be32 address);
1281int wl1271_acx_pm_config(struct wl1271 *wl); 1280int wl1271_acx_pm_config(struct wl1271 *wl);
1282int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable); 1281int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif,
1283int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid); 1282 bool enable);
1284int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, 1283int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1285 s16 thold, u8 hyst); 1284 u8 index, u8 tpl_valid);
1286int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl); 1285int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1286 bool enable, s16 thold, u8 hyst);
1287int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
1288 struct wl12xx_vif *wlvif);
1287int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, 1289int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1288 struct ieee80211_sta_ht_cap *ht_cap, 1290 struct ieee80211_sta_ht_cap *ht_cap,
1289 bool allow_ht_operation, u8 hlid); 1291 bool allow_ht_operation, u8 hlid);
1290int wl1271_acx_set_ht_information(struct wl1271 *wl, 1292int wl1271_acx_set_ht_information(struct wl1271 *wl,
1293 struct wl12xx_vif *wlvif,
1291 u16 ht_operation_mode); 1294 u16 ht_operation_mode);
1292int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl); 1295int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
1296 struct wl12xx_vif *wlvif);
1293int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, 1297int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
1294 u16 ssn, bool enable, u8 peer_hlid); 1298 u16 ssn, bool enable, u8 peer_hlid);
1295int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); 1299int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
1296int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable); 1300int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1297int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl); 1301 bool enable);
1298int wl1271_acx_config_ps(struct wl1271 *wl); 1302int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
1303int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
1299int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr); 1304int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
1300int wl1271_acx_fm_coex(struct wl1271 *wl); 1305int wl1271_acx_fm_coex(struct wl1271 *wl);
1301int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl); 1306int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index d4e628db76b0..8f9cf5a816ea 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -23,7 +23,9 @@
23 23
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/wl12xx.h> 25#include <linux/wl12xx.h>
26#include <linux/export.h>
26 27
28#include "debug.h"
27#include "acx.h" 29#include "acx.h"
28#include "reg.h" 30#include "reg.h"
29#include "boot.h" 31#include "boot.h"
@@ -346,6 +348,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
346 nvs_ptr += 3; 348 nvs_ptr += 3;
347 349
348 for (i = 0; i < burst_len; i++) { 350 for (i = 0; i < burst_len; i++) {
351 if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
352 goto out_badnvs;
353
349 val = (nvs_ptr[0] | (nvs_ptr[1] << 8) 354 val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
350 | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); 355 | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
351 356
@@ -357,6 +362,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
357 nvs_ptr += 4; 362 nvs_ptr += 4;
358 dest_addr += 4; 363 dest_addr += 4;
359 } 364 }
365
366 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
367 goto out_badnvs;
360 } 368 }
361 369
362 /* 370 /*
@@ -368,6 +376,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
368 */ 376 */
369 nvs_ptr = (u8 *)wl->nvs + 377 nvs_ptr = (u8 *)wl->nvs +
370 ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4); 378 ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
379
380 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
381 goto out_badnvs;
382
371 nvs_len -= nvs_ptr - (u8 *)wl->nvs; 383 nvs_len -= nvs_ptr - (u8 *)wl->nvs;
372 384
373 /* Now we must set the partition correctly */ 385 /* Now we must set the partition correctly */
@@ -383,6 +395,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
383 395
384 kfree(nvs_aligned); 396 kfree(nvs_aligned);
385 return 0; 397 return 0;
398
399out_badnvs:
400 wl1271_error("nvs data is malformed");
401 return -EILSEQ;
386} 402}
387 403
388static void wl1271_boot_enable_interrupts(struct wl1271 *wl) 404static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index a52299e548fa..e0d217979485 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl12xx.h" 31#include "wl12xx.h"
32#include "debug.h"
32#include "reg.h" 33#include "reg.h"
33#include "io.h" 34#include "io.h"
34#include "acx.h" 35#include "acx.h"
@@ -120,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
120 if (!wl->nvs) 121 if (!wl->nvs)
121 return -ENODEV; 122 return -ENODEV;
122 123
124 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
125 wl1271_warning("FEM index from INI out of bounds");
126 return -EINVAL;
127 }
128
123 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); 129 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
124 if (!gen_parms) 130 if (!gen_parms)
125 return -ENOMEM; 131 return -ENOMEM;
@@ -143,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
143 gp->tx_bip_fem_manufacturer = 149 gp->tx_bip_fem_manufacturer =
144 gen_parms->general_params.tx_bip_fem_manufacturer; 150 gen_parms->general_params.tx_bip_fem_manufacturer;
145 151
152 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
153 wl1271_warning("FEM index from FW out of bounds");
154 ret = -EINVAL;
155 goto out;
156 }
157
146 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", 158 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
147 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); 159 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
148 160
@@ -162,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
162 if (!wl->nvs) 174 if (!wl->nvs)
163 return -ENODEV; 175 return -ENODEV;
164 176
177 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
178 wl1271_warning("FEM index from ini out of bounds");
179 return -EINVAL;
180 }
181
165 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); 182 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
166 if (!gen_parms) 183 if (!gen_parms)
167 return -ENOMEM; 184 return -ENOMEM;
@@ -186,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
186 gp->tx_bip_fem_manufacturer = 203 gp->tx_bip_fem_manufacturer =
187 gen_parms->general_params.tx_bip_fem_manufacturer; 204 gen_parms->general_params.tx_bip_fem_manufacturer;
188 205
206 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
207 wl1271_warning("FEM index from FW out of bounds");
208 ret = -EINVAL;
209 goto out;
210 }
211
189 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", 212 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
190 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); 213 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
191 214
@@ -358,7 +381,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
358 return 0; 381 return 0;
359} 382}
360 383
361int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id) 384int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
385 u8 *role_id)
362{ 386{
363 struct wl12xx_cmd_role_enable *cmd; 387 struct wl12xx_cmd_role_enable *cmd;
364 int ret; 388 int ret;
@@ -381,7 +405,7 @@ int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
381 goto out_free; 405 goto out_free;
382 } 406 }
383 407
384 memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN); 408 memcpy(cmd->mac_address, addr, ETH_ALEN);
385 cmd->role_type = role_type; 409 cmd->role_type = role_type;
386 410
387 ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0); 411 ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
@@ -433,37 +457,41 @@ out:
433 return ret; 457 return ret;
434} 458}
435 459
436static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid) 460int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
437{ 461{
438 u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS); 462 u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
439 if (link >= WL12XX_MAX_LINKS) 463 if (link >= WL12XX_MAX_LINKS)
440 return -EBUSY; 464 return -EBUSY;
441 465
442 __set_bit(link, wl->links_map); 466 __set_bit(link, wl->links_map);
467 __set_bit(link, wlvif->links_map);
443 *hlid = link; 468 *hlid = link;
444 return 0; 469 return 0;
445} 470}
446 471
447static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid) 472void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
448{ 473{
449 if (*hlid == WL12XX_INVALID_LINK_ID) 474 if (*hlid == WL12XX_INVALID_LINK_ID)
450 return; 475 return;
451 476
452 __clear_bit(*hlid, wl->links_map); 477 __clear_bit(*hlid, wl->links_map);
478 __clear_bit(*hlid, wlvif->links_map);
453 *hlid = WL12XX_INVALID_LINK_ID; 479 *hlid = WL12XX_INVALID_LINK_ID;
454} 480}
455 481
456static int wl12xx_get_new_session_id(struct wl1271 *wl) 482static int wl12xx_get_new_session_id(struct wl1271 *wl,
483 struct wl12xx_vif *wlvif)
457{ 484{
458 if (wl->session_counter >= SESSION_COUNTER_MAX) 485 if (wlvif->session_counter >= SESSION_COUNTER_MAX)
459 wl->session_counter = 0; 486 wlvif->session_counter = 0;
460 487
461 wl->session_counter++; 488 wlvif->session_counter++;
462 489
463 return wl->session_counter; 490 return wlvif->session_counter;
464} 491}
465 492
466int wl12xx_cmd_role_start_dev(struct wl1271 *wl) 493static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
494 struct wl12xx_vif *wlvif)
467{ 495{
468 struct wl12xx_cmd_role_start *cmd; 496 struct wl12xx_cmd_role_start *cmd;
469 int ret; 497 int ret;
@@ -474,20 +502,20 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
474 goto out; 502 goto out;
475 } 503 }
476 504
477 wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id); 505 wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
478 506
479 cmd->role_id = wl->dev_role_id; 507 cmd->role_id = wlvif->dev_role_id;
480 if (wl->band == IEEE80211_BAND_5GHZ) 508 if (wlvif->band == IEEE80211_BAND_5GHZ)
481 cmd->band = WL12XX_BAND_5GHZ; 509 cmd->band = WL12XX_BAND_5GHZ;
482 cmd->channel = wl->channel; 510 cmd->channel = wlvif->channel;
483 511
484 if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) { 512 if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
485 ret = wl12xx_allocate_link(wl, &wl->dev_hlid); 513 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
486 if (ret) 514 if (ret)
487 goto out_free; 515 goto out_free;
488 } 516 }
489 cmd->device.hlid = wl->dev_hlid; 517 cmd->device.hlid = wlvif->dev_hlid;
490 cmd->device.session = wl->session_counter; 518 cmd->device.session = wlvif->session_counter;
491 519
492 wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d", 520 wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
493 cmd->role_id, cmd->device.hlid, cmd->device.session); 521 cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -502,9 +530,7 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
502 530
503err_hlid: 531err_hlid:
504 /* clear links on error */ 532 /* clear links on error */
505 __clear_bit(wl->dev_hlid, wl->links_map); 533 wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
506 wl->dev_hlid = WL12XX_INVALID_LINK_ID;
507
508 534
509out_free: 535out_free:
510 kfree(cmd); 536 kfree(cmd);
@@ -513,12 +539,13 @@ out:
513 return ret; 539 return ret;
514} 540}
515 541
516int wl12xx_cmd_role_stop_dev(struct wl1271 *wl) 542static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
543 struct wl12xx_vif *wlvif)
517{ 544{
518 struct wl12xx_cmd_role_stop *cmd; 545 struct wl12xx_cmd_role_stop *cmd;
519 int ret; 546 int ret;
520 547
521 if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID)) 548 if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID))
522 return -EINVAL; 549 return -EINVAL;
523 550
524 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 551 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -529,7 +556,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
529 556
530 wl1271_debug(DEBUG_CMD, "cmd role stop dev"); 557 wl1271_debug(DEBUG_CMD, "cmd role stop dev");
531 558
532 cmd->role_id = wl->dev_role_id; 559 cmd->role_id = wlvif->dev_role_id;
533 cmd->disc_type = DISCONNECT_IMMEDIATE; 560 cmd->disc_type = DISCONNECT_IMMEDIATE;
534 cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED); 561 cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
535 562
@@ -545,7 +572,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
545 goto out_free; 572 goto out_free;
546 } 573 }
547 574
548 wl12xx_free_link(wl, &wl->dev_hlid); 575 wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
549 576
550out_free: 577out_free:
551 kfree(cmd); 578 kfree(cmd);
@@ -554,8 +581,9 @@ out:
554 return ret; 581 return ret;
555} 582}
556 583
557int wl12xx_cmd_role_start_sta(struct wl1271 *wl) 584int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
558{ 585{
586 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
559 struct wl12xx_cmd_role_start *cmd; 587 struct wl12xx_cmd_role_start *cmd;
560 int ret; 588 int ret;
561 589
@@ -565,33 +593,33 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
565 goto out; 593 goto out;
566 } 594 }
567 595
568 wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id); 596 wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
569 597
570 cmd->role_id = wl->role_id; 598 cmd->role_id = wlvif->role_id;
571 if (wl->band == IEEE80211_BAND_5GHZ) 599 if (wlvif->band == IEEE80211_BAND_5GHZ)
572 cmd->band = WL12XX_BAND_5GHZ; 600 cmd->band = WL12XX_BAND_5GHZ;
573 cmd->channel = wl->channel; 601 cmd->channel = wlvif->channel;
574 cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set); 602 cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
575 cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int); 603 cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
576 cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY; 604 cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
577 cmd->sta.ssid_len = wl->ssid_len; 605 cmd->sta.ssid_len = wlvif->ssid_len;
578 memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len); 606 memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
579 memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN); 607 memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
580 cmd->sta.local_rates = cpu_to_le32(wl->rate_set); 608 cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
581 609
582 if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) { 610 if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
583 ret = wl12xx_allocate_link(wl, &wl->sta_hlid); 611 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
584 if (ret) 612 if (ret)
585 goto out_free; 613 goto out_free;
586 } 614 }
587 cmd->sta.hlid = wl->sta_hlid; 615 cmd->sta.hlid = wlvif->sta.hlid;
588 cmd->sta.session = wl12xx_get_new_session_id(wl); 616 cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
589 cmd->sta.remote_rates = cpu_to_le32(wl->rate_set); 617 cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
590 618
591 wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d " 619 wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
592 "basic_rate_set: 0x%x, remote_rates: 0x%x", 620 "basic_rate_set: 0x%x, remote_rates: 0x%x",
593 wl->role_id, cmd->sta.hlid, cmd->sta.session, 621 wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
594 wl->basic_rate_set, wl->rate_set); 622 wlvif->basic_rate_set, wlvif->rate_set);
595 623
596 ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); 624 ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
597 if (ret < 0) { 625 if (ret < 0) {
@@ -603,7 +631,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
603 631
604err_hlid: 632err_hlid:
605 /* clear links on error. */ 633 /* clear links on error. */
606 wl12xx_free_link(wl, &wl->sta_hlid); 634 wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
607 635
608out_free: 636out_free:
609 kfree(cmd); 637 kfree(cmd);
@@ -613,12 +641,12 @@ out:
613} 641}
614 642
615/* use this function to stop ibss as well */ 643/* use this function to stop ibss as well */
616int wl12xx_cmd_role_stop_sta(struct wl1271 *wl) 644int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
617{ 645{
618 struct wl12xx_cmd_role_stop *cmd; 646 struct wl12xx_cmd_role_stop *cmd;
619 int ret; 647 int ret;
620 648
621 if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID)) 649 if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
622 return -EINVAL; 650 return -EINVAL;
623 651
624 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 652 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -627,9 +655,9 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
627 goto out; 655 goto out;
628 } 656 }
629 657
630 wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id); 658 wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id);
631 659
632 cmd->role_id = wl->role_id; 660 cmd->role_id = wlvif->role_id;
633 cmd->disc_type = DISCONNECT_IMMEDIATE; 661 cmd->disc_type = DISCONNECT_IMMEDIATE;
634 cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED); 662 cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
635 663
@@ -639,7 +667,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
639 goto out_free; 667 goto out_free;
640 } 668 }
641 669
642 wl12xx_free_link(wl, &wl->sta_hlid); 670 wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
643 671
644out_free: 672out_free:
645 kfree(cmd); 673 kfree(cmd);
@@ -648,16 +676,17 @@ out:
648 return ret; 676 return ret;
649} 677}
650 678
651int wl12xx_cmd_role_start_ap(struct wl1271 *wl) 679int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
652{ 680{
653 struct wl12xx_cmd_role_start *cmd; 681 struct wl12xx_cmd_role_start *cmd;
654 struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; 682 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
683 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
655 int ret; 684 int ret;
656 685
657 wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id); 686 wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
658 687
659 /* trying to use hidden SSID with an old hostapd version */ 688 /* trying to use hidden SSID with an old hostapd version */
660 if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) { 689 if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
661 wl1271_error("got a null SSID from beacon/bss"); 690 wl1271_error("got a null SSID from beacon/bss");
662 ret = -EINVAL; 691 ret = -EINVAL;
663 goto out; 692 goto out;
@@ -669,30 +698,30 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
669 goto out; 698 goto out;
670 } 699 }
671 700
672 ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid); 701 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid);
673 if (ret < 0) 702 if (ret < 0)
674 goto out_free; 703 goto out_free;
675 704
676 ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid); 705 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid);
677 if (ret < 0) 706 if (ret < 0)
678 goto out_free_global; 707 goto out_free_global;
679 708
680 cmd->role_id = wl->role_id; 709 cmd->role_id = wlvif->role_id;
681 cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period); 710 cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
682 cmd->ap.bss_index = WL1271_AP_BSS_INDEX; 711 cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
683 cmd->ap.global_hlid = wl->ap_global_hlid; 712 cmd->ap.global_hlid = wlvif->ap.global_hlid;
684 cmd->ap.broadcast_hlid = wl->ap_bcast_hlid; 713 cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
685 cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set); 714 cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
686 cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int); 715 cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
687 cmd->ap.dtim_interval = bss_conf->dtim_period; 716 cmd->ap.dtim_interval = bss_conf->dtim_period;
688 cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP; 717 cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
689 cmd->channel = wl->channel; 718 cmd->channel = wlvif->channel;
690 719
691 if (!bss_conf->hidden_ssid) { 720 if (!bss_conf->hidden_ssid) {
692 /* take the SSID from the beacon for backward compatibility */ 721 /* take the SSID from the beacon for backward compatibility */
693 cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC; 722 cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
694 cmd->ap.ssid_len = wl->ssid_len; 723 cmd->ap.ssid_len = wlvif->ssid_len;
695 memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len); 724 memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
696 } else { 725 } else {
697 cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN; 726 cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
698 cmd->ap.ssid_len = bss_conf->ssid_len; 727 cmd->ap.ssid_len = bss_conf->ssid_len;
@@ -701,7 +730,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
701 730
702 cmd->ap.local_rates = cpu_to_le32(0xffffffff); 731 cmd->ap.local_rates = cpu_to_le32(0xffffffff);
703 732
704 switch (wl->band) { 733 switch (wlvif->band) {
705 case IEEE80211_BAND_2GHZ: 734 case IEEE80211_BAND_2GHZ:
706 cmd->band = RADIO_BAND_2_4GHZ; 735 cmd->band = RADIO_BAND_2_4GHZ;
707 break; 736 break;
@@ -709,7 +738,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
709 cmd->band = RADIO_BAND_5GHZ; 738 cmd->band = RADIO_BAND_5GHZ;
710 break; 739 break;
711 default: 740 default:
712 wl1271_warning("ap start - unknown band: %d", (int)wl->band); 741 wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
713 cmd->band = RADIO_BAND_2_4GHZ; 742 cmd->band = RADIO_BAND_2_4GHZ;
714 break; 743 break;
715 } 744 }
@@ -723,10 +752,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
723 goto out_free; 752 goto out_free;
724 753
725out_free_bcast: 754out_free_bcast:
726 wl12xx_free_link(wl, &wl->ap_bcast_hlid); 755 wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
727 756
728out_free_global: 757out_free_global:
729 wl12xx_free_link(wl, &wl->ap_global_hlid); 758 wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
730 759
731out_free: 760out_free:
732 kfree(cmd); 761 kfree(cmd);
@@ -735,7 +764,7 @@ out:
735 return ret; 764 return ret;
736} 765}
737 766
738int wl12xx_cmd_role_stop_ap(struct wl1271 *wl) 767int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
739{ 768{
740 struct wl12xx_cmd_role_stop *cmd; 769 struct wl12xx_cmd_role_stop *cmd;
741 int ret; 770 int ret;
@@ -746,9 +775,9 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
746 goto out; 775 goto out;
747 } 776 }
748 777
749 wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id); 778 wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id);
750 779
751 cmd->role_id = wl->role_id; 780 cmd->role_id = wlvif->role_id;
752 781
753 ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0); 782 ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
754 if (ret < 0) { 783 if (ret < 0) {
@@ -756,8 +785,8 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
756 goto out_free; 785 goto out_free;
757 } 786 }
758 787
759 wl12xx_free_link(wl, &wl->ap_bcast_hlid); 788 wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
760 wl12xx_free_link(wl, &wl->ap_global_hlid); 789 wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
761 790
762out_free: 791out_free:
763 kfree(cmd); 792 kfree(cmd);
@@ -766,10 +795,11 @@ out:
766 return ret; 795 return ret;
767} 796}
768 797
769int wl12xx_cmd_role_start_ibss(struct wl1271 *wl) 798int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
770{ 799{
800 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
771 struct wl12xx_cmd_role_start *cmd; 801 struct wl12xx_cmd_role_start *cmd;
772 struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; 802 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
773 int ret; 803 int ret;
774 804
775 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 805 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -778,35 +808,36 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
778 goto out; 808 goto out;
779 } 809 }
780 810
781 wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id); 811 wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
782 812
783 cmd->role_id = wl->role_id; 813 cmd->role_id = wlvif->role_id;
784 if (wl->band == IEEE80211_BAND_5GHZ) 814 if (wlvif->band == IEEE80211_BAND_5GHZ)
785 cmd->band = WL12XX_BAND_5GHZ; 815 cmd->band = WL12XX_BAND_5GHZ;
786 cmd->channel = wl->channel; 816 cmd->channel = wlvif->channel;
787 cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set); 817 cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
788 cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int); 818 cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
789 cmd->ibss.dtim_interval = bss_conf->dtim_period; 819 cmd->ibss.dtim_interval = bss_conf->dtim_period;
790 cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY; 820 cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
791 cmd->ibss.ssid_len = wl->ssid_len; 821 cmd->ibss.ssid_len = wlvif->ssid_len;
792 memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len); 822 memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len);
793 memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN); 823 memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN);
794 cmd->sta.local_rates = cpu_to_le32(wl->rate_set); 824 cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
795 825
796 if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) { 826 if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
797 ret = wl12xx_allocate_link(wl, &wl->sta_hlid); 827 ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
798 if (ret) 828 if (ret)
799 goto out_free; 829 goto out_free;
800 } 830 }
801 cmd->ibss.hlid = wl->sta_hlid; 831 cmd->ibss.hlid = wlvif->sta.hlid;
802 cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set); 832 cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set);
803 833
804 wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d " 834 wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
805 "basic_rate_set: 0x%x, remote_rates: 0x%x", 835 "basic_rate_set: 0x%x, remote_rates: 0x%x",
806 wl->role_id, cmd->sta.hlid, cmd->sta.session, 836 wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
807 wl->basic_rate_set, wl->rate_set); 837 wlvif->basic_rate_set, wlvif->rate_set);
808 838
809 wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid); 839 wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM",
840 vif->bss_conf.bssid);
810 841
811 ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0); 842 ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
812 if (ret < 0) { 843 if (ret < 0) {
@@ -818,7 +849,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
818 849
819err_hlid: 850err_hlid:
820 /* clear links on error. */ 851 /* clear links on error. */
821 wl12xx_free_link(wl, &wl->sta_hlid); 852 wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
822 853
823out_free: 854out_free:
824 kfree(cmd); 855 kfree(cmd);
@@ -962,7 +993,8 @@ out:
962 return ret; 993 return ret;
963} 994}
964 995
965int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode) 996int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
997 u8 ps_mode)
966{ 998{
967 struct wl1271_cmd_ps_params *ps_params = NULL; 999 struct wl1271_cmd_ps_params *ps_params = NULL;
968 int ret = 0; 1000 int ret = 0;
@@ -975,7 +1007,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
975 goto out; 1007 goto out;
976 } 1008 }
977 1009
978 ps_params->role_id = wl->role_id; 1010 ps_params->role_id = wlvif->role_id;
979 ps_params->ps_mode = ps_mode; 1011 ps_params->ps_mode = ps_mode;
980 1012
981 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 1013 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -1030,7 +1062,7 @@ out:
1030 return ret; 1062 return ret;
1031} 1063}
1032 1064
1033int wl1271_cmd_build_null_data(struct wl1271 *wl) 1065int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1034{ 1066{
1035 struct sk_buff *skb = NULL; 1067 struct sk_buff *skb = NULL;
1036 int size; 1068 int size;
@@ -1038,11 +1070,12 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
1038 int ret = -ENOMEM; 1070 int ret = -ENOMEM;
1039 1071
1040 1072
1041 if (wl->bss_type == BSS_TYPE_IBSS) { 1073 if (wlvif->bss_type == BSS_TYPE_IBSS) {
1042 size = sizeof(struct wl12xx_null_data_template); 1074 size = sizeof(struct wl12xx_null_data_template);
1043 ptr = NULL; 1075 ptr = NULL;
1044 } else { 1076 } else {
1045 skb = ieee80211_nullfunc_get(wl->hw, wl->vif); 1077 skb = ieee80211_nullfunc_get(wl->hw,
1078 wl12xx_wlvif_to_vif(wlvif));
1046 if (!skb) 1079 if (!skb)
1047 goto out; 1080 goto out;
1048 size = skb->len; 1081 size = skb->len;
@@ -1050,7 +1083,7 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
1050 } 1083 }
1051 1084
1052 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0, 1085 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
1053 wl->basic_rate); 1086 wlvif->basic_rate);
1054 1087
1055out: 1088out:
1056 dev_kfree_skb(skb); 1089 dev_kfree_skb(skb);
@@ -1061,19 +1094,21 @@ out:
1061 1094
1062} 1095}
1063 1096
1064int wl1271_cmd_build_klv_null_data(struct wl1271 *wl) 1097int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
1098 struct wl12xx_vif *wlvif)
1065{ 1099{
1100 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
1066 struct sk_buff *skb = NULL; 1101 struct sk_buff *skb = NULL;
1067 int ret = -ENOMEM; 1102 int ret = -ENOMEM;
1068 1103
1069 skb = ieee80211_nullfunc_get(wl->hw, wl->vif); 1104 skb = ieee80211_nullfunc_get(wl->hw, vif);
1070 if (!skb) 1105 if (!skb)
1071 goto out; 1106 goto out;
1072 1107
1073 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, 1108 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
1074 skb->data, skb->len, 1109 skb->data, skb->len,
1075 CMD_TEMPL_KLV_IDX_NULL_DATA, 1110 CMD_TEMPL_KLV_IDX_NULL_DATA,
1076 wl->basic_rate); 1111 wlvif->basic_rate);
1077 1112
1078out: 1113out:
1079 dev_kfree_skb(skb); 1114 dev_kfree_skb(skb);
@@ -1084,32 +1119,35 @@ out:
1084 1119
1085} 1120}
1086 1121
1087int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid) 1122int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1123 u16 aid)
1088{ 1124{
1125 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
1089 struct sk_buff *skb; 1126 struct sk_buff *skb;
1090 int ret = 0; 1127 int ret = 0;
1091 1128
1092 skb = ieee80211_pspoll_get(wl->hw, wl->vif); 1129 skb = ieee80211_pspoll_get(wl->hw, vif);
1093 if (!skb) 1130 if (!skb)
1094 goto out; 1131 goto out;
1095 1132
1096 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data, 1133 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
1097 skb->len, 0, wl->basic_rate_set); 1134 skb->len, 0, wlvif->basic_rate_set);
1098 1135
1099out: 1136out:
1100 dev_kfree_skb(skb); 1137 dev_kfree_skb(skb);
1101 return ret; 1138 return ret;
1102} 1139}
1103 1140
1104int wl1271_cmd_build_probe_req(struct wl1271 *wl, 1141int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1105 const u8 *ssid, size_t ssid_len, 1142 const u8 *ssid, size_t ssid_len,
1106 const u8 *ie, size_t ie_len, u8 band) 1143 const u8 *ie, size_t ie_len, u8 band)
1107{ 1144{
1145 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
1108 struct sk_buff *skb; 1146 struct sk_buff *skb;
1109 int ret; 1147 int ret;
1110 u32 rate; 1148 u32 rate;
1111 1149
1112 skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len, 1150 skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
1113 ie, ie_len); 1151 ie, ie_len);
1114 if (!skb) { 1152 if (!skb) {
1115 ret = -ENOMEM; 1153 ret = -ENOMEM;
@@ -1118,7 +1156,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl,
1118 1156
1119 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len); 1157 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
1120 1158
1121 rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); 1159 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
1122 if (band == IEEE80211_BAND_2GHZ) 1160 if (band == IEEE80211_BAND_2GHZ)
1123 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 1161 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
1124 skb->data, skb->len, 0, rate); 1162 skb->data, skb->len, 0, rate);
@@ -1132,20 +1170,22 @@ out:
1132} 1170}
1133 1171
1134struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, 1172struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
1173 struct wl12xx_vif *wlvif,
1135 struct sk_buff *skb) 1174 struct sk_buff *skb)
1136{ 1175{
1176 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
1137 int ret; 1177 int ret;
1138 u32 rate; 1178 u32 rate;
1139 1179
1140 if (!skb) 1180 if (!skb)
1141 skb = ieee80211_ap_probereq_get(wl->hw, wl->vif); 1181 skb = ieee80211_ap_probereq_get(wl->hw, vif);
1142 if (!skb) 1182 if (!skb)
1143 goto out; 1183 goto out;
1144 1184
1145 wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len); 1185 wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
1146 1186
1147 rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]); 1187 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
1148 if (wl->band == IEEE80211_BAND_2GHZ) 1188 if (wlvif->band == IEEE80211_BAND_2GHZ)
1149 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 1189 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
1150 skb->data, skb->len, 0, rate); 1190 skb->data, skb->len, 0, rate);
1151 else 1191 else
@@ -1159,9 +1199,11 @@ out:
1159 return skb; 1199 return skb;
1160} 1200}
1161 1201
1162int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr) 1202int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1203 __be32 ip_addr)
1163{ 1204{
1164 int ret; 1205 int ret;
1206 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
1165 struct wl12xx_arp_rsp_template tmpl; 1207 struct wl12xx_arp_rsp_template tmpl;
1166 struct ieee80211_hdr_3addr *hdr; 1208 struct ieee80211_hdr_3addr *hdr;
1167 struct arphdr *arp_hdr; 1209 struct arphdr *arp_hdr;
@@ -1173,8 +1215,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
1173 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 1215 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1174 IEEE80211_STYPE_DATA | 1216 IEEE80211_STYPE_DATA |
1175 IEEE80211_FCTL_TODS); 1217 IEEE80211_FCTL_TODS);
1176 memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN); 1218 memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
1177 memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN); 1219 memcpy(hdr->addr2, vif->addr, ETH_ALEN);
1178 memset(hdr->addr3, 0xff, ETH_ALEN); 1220 memset(hdr->addr3, 0xff, ETH_ALEN);
1179 1221
1180 /* llc layer */ 1222 /* llc layer */
@@ -1190,25 +1232,26 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
1190 arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY); 1232 arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
1191 1233
1192 /* arp payload */ 1234 /* arp payload */
1193 memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN); 1235 memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN);
1194 tmpl.sender_ip = ip_addr; 1236 tmpl.sender_ip = ip_addr;
1195 1237
1196 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP, 1238 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
1197 &tmpl, sizeof(tmpl), 0, 1239 &tmpl, sizeof(tmpl), 0,
1198 wl->basic_rate); 1240 wlvif->basic_rate);
1199 1241
1200 return ret; 1242 return ret;
1201} 1243}
1202 1244
1203int wl1271_build_qos_null_data(struct wl1271 *wl) 1245int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1204{ 1246{
1247 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1205 struct ieee80211_qos_hdr template; 1248 struct ieee80211_qos_hdr template;
1206 1249
1207 memset(&template, 0, sizeof(template)); 1250 memset(&template, 0, sizeof(template));
1208 1251
1209 memcpy(template.addr1, wl->bssid, ETH_ALEN); 1252 memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN);
1210 memcpy(template.addr2, wl->mac_addr, ETH_ALEN); 1253 memcpy(template.addr2, vif->addr, ETH_ALEN);
1211 memcpy(template.addr3, wl->bssid, ETH_ALEN); 1254 memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN);
1212 1255
1213 template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 1256 template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1214 IEEE80211_STYPE_QOS_NULLFUNC | 1257 IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1219,7 +1262,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
1219 1262
1220 return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template, 1263 return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
1221 sizeof(template), 0, 1264 sizeof(template), 0,
1222 wl->basic_rate); 1265 wlvif->basic_rate);
1223} 1266}
1224 1267
1225int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid) 1268int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
@@ -1253,7 +1296,8 @@ out:
1253 return ret; 1296 return ret;
1254} 1297}
1255 1298
1256int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 1299int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1300 u16 action, u8 id, u8 key_type,
1257 u8 key_size, const u8 *key, const u8 *addr, 1301 u8 key_size, const u8 *key, const u8 *addr,
1258 u32 tx_seq_32, u16 tx_seq_16) 1302 u32 tx_seq_32, u16 tx_seq_16)
1259{ 1303{
@@ -1261,7 +1305,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
1261 int ret = 0; 1305 int ret = 0;
1262 1306
1263 /* hlid might have already been deleted */ 1307 /* hlid might have already been deleted */
1264 if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) 1308 if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
1265 return 0; 1309 return 0;
1266 1310
1267 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1311 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1270,7 +1314,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
1270 goto out; 1314 goto out;
1271 } 1315 }
1272 1316
1273 cmd->hlid = wl->sta_hlid; 1317 cmd->hlid = wlvif->sta.hlid;
1274 1318
1275 if (key_type == KEY_WEP) 1319 if (key_type == KEY_WEP)
1276 cmd->lid_key_type = WEP_DEFAULT_LID_TYPE; 1320 cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
@@ -1321,9 +1365,10 @@ out:
1321 * TODO: merge with sta/ibss into 1 set_key function. 1365 * TODO: merge with sta/ibss into 1 set_key function.
1322 * note there are slight diffs 1366 * note there are slight diffs
1323 */ 1367 */
1324int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 1368int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1325 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, 1369 u16 action, u8 id, u8 key_type,
1326 u16 tx_seq_16) 1370 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
1371 u16 tx_seq_16)
1327{ 1372{
1328 struct wl1271_cmd_set_keys *cmd; 1373 struct wl1271_cmd_set_keys *cmd;
1329 int ret = 0; 1374 int ret = 0;
@@ -1333,7 +1378,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
1333 if (!cmd) 1378 if (!cmd)
1334 return -ENOMEM; 1379 return -ENOMEM;
1335 1380
1336 if (hlid == wl->ap_bcast_hlid) { 1381 if (hlid == wlvif->ap.bcast_hlid) {
1337 if (key_type == KEY_WEP) 1382 if (key_type == KEY_WEP)
1338 lid_type = WEP_DEFAULT_LID_TYPE; 1383 lid_type = WEP_DEFAULT_LID_TYPE;
1339 else 1384 else
@@ -1411,7 +1456,8 @@ out:
1411 return ret; 1456 return ret;
1412} 1457}
1413 1458
1414int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid) 1459int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1460 struct ieee80211_sta *sta, u8 hlid)
1415{ 1461{
1416 struct wl12xx_cmd_add_peer *cmd; 1462 struct wl12xx_cmd_add_peer *cmd;
1417 int i, ret; 1463 int i, ret;
@@ -1438,13 +1484,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
1438 else 1484 else
1439 cmd->psd_type[i] = WL1271_PSD_LEGACY; 1485 cmd->psd_type[i] = WL1271_PSD_LEGACY;
1440 1486
1441 sta_rates = sta->supp_rates[wl->band]; 1487 sta_rates = sta->supp_rates[wlvif->band];
1442 if (sta->ht_cap.ht_supported) 1488 if (sta->ht_cap.ht_supported)
1443 sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET; 1489 sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
1444 1490
1445 cmd->supported_rates = 1491 cmd->supported_rates =
1446 cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, 1492 cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
1447 wl->band)); 1493 wlvif->band));
1448 1494
1449 wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x", 1495 wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
1450 cmd->supported_rates, sta->uapsd_queues); 1496 cmd->supported_rates, sta->uapsd_queues);
@@ -1584,12 +1630,13 @@ out:
1584 return ret; 1630 return ret;
1585} 1631}
1586 1632
1587static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id) 1633static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1634 u8 role_id)
1588{ 1635{
1589 struct wl12xx_cmd_roc *cmd; 1636 struct wl12xx_cmd_roc *cmd;
1590 int ret = 0; 1637 int ret = 0;
1591 1638
1592 wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id); 1639 wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
1593 1640
1594 if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID)) 1641 if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
1595 return -EINVAL; 1642 return -EINVAL;
@@ -1601,8 +1648,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
1601 } 1648 }
1602 1649
1603 cmd->role_id = role_id; 1650 cmd->role_id = role_id;
1604 cmd->channel = wl->channel; 1651 cmd->channel = wlvif->channel;
1605 switch (wl->band) { 1652 switch (wlvif->band) {
1606 case IEEE80211_BAND_2GHZ: 1653 case IEEE80211_BAND_2GHZ:
1607 cmd->band = RADIO_BAND_2_4GHZ; 1654 cmd->band = RADIO_BAND_2_4GHZ;
1608 break; 1655 break;
@@ -1610,7 +1657,7 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
1610 cmd->band = RADIO_BAND_5GHZ; 1657 cmd->band = RADIO_BAND_5GHZ;
1611 break; 1658 break;
1612 default: 1659 default:
1613 wl1271_error("roc - unknown band: %d", (int)wl->band); 1660 wl1271_error("roc - unknown band: %d", (int)wlvif->band);
1614 ret = -EINVAL; 1661 ret = -EINVAL;
1615 goto out_free; 1662 goto out_free;
1616 } 1663 }
@@ -1657,14 +1704,14 @@ out:
1657 return ret; 1704 return ret;
1658} 1705}
1659 1706
1660int wl12xx_roc(struct wl1271 *wl, u8 role_id) 1707int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
1661{ 1708{
1662 int ret = 0; 1709 int ret = 0;
1663 1710
1664 if (WARN_ON(test_bit(role_id, wl->roc_map))) 1711 if (WARN_ON(test_bit(role_id, wl->roc_map)))
1665 return 0; 1712 return 0;
1666 1713
1667 ret = wl12xx_cmd_roc(wl, role_id); 1714 ret = wl12xx_cmd_roc(wl, wlvif, role_id);
1668 if (ret < 0) 1715 if (ret < 0)
1669 goto out; 1716 goto out;
1670 1717
@@ -1753,3 +1800,50 @@ out_free:
1753out: 1800out:
1754 return ret; 1801 return ret;
1755} 1802}
1803
1804/* start dev role and roc on its channel */
1805int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1806{
1807 int ret;
1808
1809 if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
1810 wlvif->bss_type == BSS_TYPE_IBSS)))
1811 return -EINVAL;
1812
1813 ret = wl12xx_cmd_role_start_dev(wl, wlvif);
1814 if (ret < 0)
1815 goto out;
1816
1817 ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
1818 if (ret < 0)
1819 goto out_stop;
1820
1821 return 0;
1822
1823out_stop:
1824 wl12xx_cmd_role_stop_dev(wl, wlvif);
1825out:
1826 return ret;
1827}
1828
1829/* croc dev hlid, and stop the role */
1830int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1831{
1832 int ret;
1833
1834 if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
1835 wlvif->bss_type == BSS_TYPE_IBSS)))
1836 return -EINVAL;
1837
1838 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
1839 ret = wl12xx_croc(wl, wlvif->dev_role_id);
1840 if (ret < 0)
1841 goto out;
1842 }
1843
1844 ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
1845 if (ret < 0)
1846 goto out;
1847out:
1848 return ret;
1849}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index b7bd42769aa7..3f7d0b93c24d 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -36,45 +36,54 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
36int wl1271_cmd_radio_parms(struct wl1271 *wl); 36int wl1271_cmd_radio_parms(struct wl1271 *wl);
37int wl128x_cmd_radio_parms(struct wl1271 *wl); 37int wl128x_cmd_radio_parms(struct wl1271 *wl);
38int wl1271_cmd_ext_radio_parms(struct wl1271 *wl); 38int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
39int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id); 39int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
40 u8 *role_id);
40int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id); 41int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
41int wl12xx_cmd_role_start_dev(struct wl1271 *wl); 42int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
42int wl12xx_cmd_role_stop_dev(struct wl1271 *wl); 43int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
43int wl12xx_cmd_role_start_sta(struct wl1271 *wl); 44int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
44int wl12xx_cmd_role_stop_sta(struct wl1271 *wl); 45int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45int wl12xx_cmd_role_start_ap(struct wl1271 *wl); 46int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
46int wl12xx_cmd_role_stop_ap(struct wl1271 *wl); 47int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
47int wl12xx_cmd_role_start_ibss(struct wl1271 *wl); 48int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
48int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 49int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
49int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 50int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
50int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 51int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
51int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); 52int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
52int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode); 53int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
54 u8 ps_mode);
53int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 55int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
54 size_t len); 56 size_t len);
55int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 57int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
56 void *buf, size_t buf_len, int index, u32 rates); 58 void *buf, size_t buf_len, int index, u32 rates);
57int wl1271_cmd_build_null_data(struct wl1271 *wl); 59int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif);
58int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); 60int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
59int wl1271_cmd_build_probe_req(struct wl1271 *wl, 61 u16 aid);
62int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
60 const u8 *ssid, size_t ssid_len, 63 const u8 *ssid, size_t ssid_len,
61 const u8 *ie, size_t ie_len, u8 band); 64 const u8 *ie, size_t ie_len, u8 band);
62struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, 65struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
66 struct wl12xx_vif *wlvif,
63 struct sk_buff *skb); 67 struct sk_buff *skb);
64int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr); 68int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
65int wl1271_build_qos_null_data(struct wl1271 *wl); 69 __be32 ip_addr);
66int wl1271_cmd_build_klv_null_data(struct wl1271 *wl); 70int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif);
71int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
72 struct wl12xx_vif *wlvif);
67int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid); 73int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
68int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 74int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
75 u16 action, u8 id, u8 key_type,
69 u8 key_size, const u8 *key, const u8 *addr, 76 u8 key_size, const u8 *key, const u8 *addr,
70 u32 tx_seq_32, u16 tx_seq_16); 77 u32 tx_seq_32, u16 tx_seq_16);
71int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 78int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
79 u16 action, u8 id, u8 key_type,
72 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, 80 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
73 u16 tx_seq_16); 81 u16 tx_seq_16);
74int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid); 82int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
75int wl12xx_roc(struct wl1271 *wl, u8 role_id); 83int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
76int wl12xx_croc(struct wl1271 *wl, u8 role_id); 84int wl12xx_croc(struct wl1271 *wl, u8 role_id);
77int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid); 85int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
86 struct ieee80211_sta *sta, u8 hlid);
78int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid); 87int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
79int wl12xx_cmd_config_fwlog(struct wl1271 *wl); 88int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
80int wl12xx_cmd_start_fwlog(struct wl1271 *wl); 89int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
@@ -82,6 +91,9 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
82int wl12xx_cmd_channel_switch(struct wl1271 *wl, 91int wl12xx_cmd_channel_switch(struct wl1271 *wl,
83 struct ieee80211_channel_switch *ch_switch); 92 struct ieee80211_channel_switch *ch_switch);
84int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl); 93int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
94int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
95 u8 *hlid);
96void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
85 97
86enum wl1271_commands { 98enum wl1271_commands {
87 CMD_INTERROGATE = 1, /*use this to read information elements*/ 99 CMD_INTERROGATE = 1, /*use this to read information elements*/
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 04bb8fbf93f9..1bcfb017058d 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -440,6 +440,10 @@ struct conf_rx_settings {
440 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ 440 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
441 CONF_HW_BIT_RATE_54MBPS) 441 CONF_HW_BIT_RATE_54MBPS)
442 442
443#define CONF_TX_CCK_RATES (CONF_HW_BIT_RATE_1MBPS | \
444 CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
445 CONF_HW_BIT_RATE_11MBPS)
446
443#define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \ 447#define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \
444 CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \ 448 CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \
445 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ 449 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h
new file mode 100644
index 000000000000..b85fd8c41e8f
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debug.h
@@ -0,0 +1,101 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2011 Texas Instruments. All rights reserved.
5 * Copyright (C) 2008-2009 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <coelho@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#ifndef __DEBUG_H__
26#define __DEBUG_H__
27
28#include <linux/bitops.h>
29#include <linux/printk.h>
30
31#define DRIVER_NAME "wl12xx"
32#define DRIVER_PREFIX DRIVER_NAME ": "
33
34enum {
35 DEBUG_NONE = 0,
36 DEBUG_IRQ = BIT(0),
37 DEBUG_SPI = BIT(1),
38 DEBUG_BOOT = BIT(2),
39 DEBUG_MAILBOX = BIT(3),
40 DEBUG_TESTMODE = BIT(4),
41 DEBUG_EVENT = BIT(5),
42 DEBUG_TX = BIT(6),
43 DEBUG_RX = BIT(7),
44 DEBUG_SCAN = BIT(8),
45 DEBUG_CRYPT = BIT(9),
46 DEBUG_PSM = BIT(10),
47 DEBUG_MAC80211 = BIT(11),
48 DEBUG_CMD = BIT(12),
49 DEBUG_ACX = BIT(13),
50 DEBUG_SDIO = BIT(14),
51 DEBUG_FILTERS = BIT(15),
52 DEBUG_ADHOC = BIT(16),
53 DEBUG_AP = BIT(17),
54 DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
55 DEBUG_ALL = ~0,
56};
57
58extern u32 wl12xx_debug_level;
59
60#define DEBUG_DUMP_LIMIT 1024
61
62#define wl1271_error(fmt, arg...) \
63 pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
64
65#define wl1271_warning(fmt, arg...) \
66 pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
67
68#define wl1271_notice(fmt, arg...) \
69 pr_info(DRIVER_PREFIX fmt "\n", ##arg)
70
71#define wl1271_info(fmt, arg...) \
72 pr_info(DRIVER_PREFIX fmt "\n", ##arg)
73
74#define wl1271_debug(level, fmt, arg...) \
75 do { \
76 if (level & wl12xx_debug_level) \
77 pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
78 } while (0)
79
80/* TODO: use pr_debug_hex_dump when it becomes available */
81#define wl1271_dump(level, prefix, buf, len) \
82 do { \
83 if (level & wl12xx_debug_level) \
84 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
85 DUMP_PREFIX_OFFSET, 16, 1, \
86 buf, \
87 min_t(size_t, len, DEBUG_DUMP_LIMIT), \
88 0); \
89 } while (0)
90
91#define wl1271_dump_ascii(level, prefix, buf, len) \
92 do { \
93 if (level & wl12xx_debug_level) \
94 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
95 DUMP_PREFIX_OFFSET, 16, 1, \
96 buf, \
97 min_t(size_t, len, DEBUG_DUMP_LIMIT), \
98 true); \
99 } while (0)
100
101#endif /* __DEBUG_H__ */
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 3999fd528302..15eb3a9c30ca 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29#include "wl12xx.h" 29#include "wl12xx.h"
30#include "debug.h"
30#include "acx.h" 31#include "acx.h"
31#include "ps.h" 32#include "ps.h"
32#include "io.h" 33#include "io.h"
@@ -316,12 +317,19 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
316{ 317{
317 struct wl1271 *wl = file->private_data; 318 struct wl1271 *wl = file->private_data;
318 int res = 0; 319 int res = 0;
319 char buf[1024]; 320 ssize_t ret;
321 char *buf;
322
323#define DRIVER_STATE_BUF_LEN 1024
324
325 buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL);
326 if (!buf)
327 return -ENOMEM;
320 328
321 mutex_lock(&wl->mutex); 329 mutex_lock(&wl->mutex);
322 330
323#define DRIVER_STATE_PRINT(x, fmt) \ 331#define DRIVER_STATE_PRINT(x, fmt) \
324 (res += scnprintf(buf + res, sizeof(buf) - res,\ 332 (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
325 #x " = " fmt "\n", wl->x)) 333 #x " = " fmt "\n", wl->x))
326 334
327#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld") 335#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
@@ -346,29 +354,14 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
346 DRIVER_STATE_PRINT_INT(tx_results_count); 354 DRIVER_STATE_PRINT_INT(tx_results_count);
347 DRIVER_STATE_PRINT_LHEX(flags); 355 DRIVER_STATE_PRINT_LHEX(flags);
348 DRIVER_STATE_PRINT_INT(tx_blocks_freed); 356 DRIVER_STATE_PRINT_INT(tx_blocks_freed);
349 DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
350 DRIVER_STATE_PRINT_INT(rx_counter); 357 DRIVER_STATE_PRINT_INT(rx_counter);
351 DRIVER_STATE_PRINT_INT(session_counter);
352 DRIVER_STATE_PRINT_INT(state); 358 DRIVER_STATE_PRINT_INT(state);
353 DRIVER_STATE_PRINT_INT(bss_type);
354 DRIVER_STATE_PRINT_INT(channel); 359 DRIVER_STATE_PRINT_INT(channel);
355 DRIVER_STATE_PRINT_HEX(rate_set);
356 DRIVER_STATE_PRINT_HEX(basic_rate_set);
357 DRIVER_STATE_PRINT_HEX(basic_rate);
358 DRIVER_STATE_PRINT_INT(band); 360 DRIVER_STATE_PRINT_INT(band);
359 DRIVER_STATE_PRINT_INT(beacon_int);
360 DRIVER_STATE_PRINT_INT(psm_entry_retry);
361 DRIVER_STATE_PRINT_INT(ps_poll_failures);
362 DRIVER_STATE_PRINT_INT(power_level); 361 DRIVER_STATE_PRINT_INT(power_level);
363 DRIVER_STATE_PRINT_INT(rssi_thold);
364 DRIVER_STATE_PRINT_INT(last_rssi_event);
365 DRIVER_STATE_PRINT_INT(sg_enabled); 362 DRIVER_STATE_PRINT_INT(sg_enabled);
366 DRIVER_STATE_PRINT_INT(enable_11a); 363 DRIVER_STATE_PRINT_INT(enable_11a);
367 DRIVER_STATE_PRINT_INT(noise); 364 DRIVER_STATE_PRINT_INT(noise);
368 DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]);
369 DRIVER_STATE_PRINT_INT(last_tx_hlid);
370 DRIVER_STATE_PRINT_INT(ba_support);
371 DRIVER_STATE_PRINT_HEX(ba_rx_bitmap);
372 DRIVER_STATE_PRINT_HEX(ap_fw_ps_map); 365 DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
373 DRIVER_STATE_PRINT_LHEX(ap_ps_map); 366 DRIVER_STATE_PRINT_LHEX(ap_ps_map);
374 DRIVER_STATE_PRINT_HEX(quirks); 367 DRIVER_STATE_PRINT_HEX(quirks);
@@ -387,10 +380,13 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
387#undef DRIVER_STATE_PRINT_LHEX 380#undef DRIVER_STATE_PRINT_LHEX
388#undef DRIVER_STATE_PRINT_STR 381#undef DRIVER_STATE_PRINT_STR
389#undef DRIVER_STATE_PRINT 382#undef DRIVER_STATE_PRINT
383#undef DRIVER_STATE_BUF_LEN
390 384
391 mutex_unlock(&wl->mutex); 385 mutex_unlock(&wl->mutex);
392 386
393 return simple_read_from_buffer(user_buf, count, ppos, buf, res); 387 ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
388 kfree(buf);
389 return ret;
394} 390}
395 391
396static const struct file_operations driver_state_ops = { 392static const struct file_operations driver_state_ops = {
@@ -399,6 +395,115 @@ static const struct file_operations driver_state_ops = {
399 .llseek = default_llseek, 395 .llseek = default_llseek,
400}; 396};
401 397
398static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
399 size_t count, loff_t *ppos)
400{
401 struct wl1271 *wl = file->private_data;
402 struct wl12xx_vif *wlvif;
403 int ret, res = 0;
404 const int buf_size = 4096;
405 char *buf;
406 char tmp_buf[64];
407
408 buf = kzalloc(buf_size, GFP_KERNEL);
409 if (!buf)
410 return -ENOMEM;
411
412 mutex_lock(&wl->mutex);
413
414#define VIF_STATE_PRINT(x, fmt) \
415 (res += scnprintf(buf + res, buf_size - res, \
416 #x " = " fmt "\n", wlvif->x))
417
418#define VIF_STATE_PRINT_LONG(x) VIF_STATE_PRINT(x, "%ld")
419#define VIF_STATE_PRINT_INT(x) VIF_STATE_PRINT(x, "%d")
420#define VIF_STATE_PRINT_STR(x) VIF_STATE_PRINT(x, "%s")
421#define VIF_STATE_PRINT_LHEX(x) VIF_STATE_PRINT(x, "0x%lx")
422#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx")
423#define VIF_STATE_PRINT_HEX(x) VIF_STATE_PRINT(x, "0x%x")
424
425#define VIF_STATE_PRINT_NSTR(x, len) \
426 do { \
427 memset(tmp_buf, 0, sizeof(tmp_buf)); \
428 memcpy(tmp_buf, wlvif->x, \
429 min_t(u8, len, sizeof(tmp_buf) - 1)); \
430 res += scnprintf(buf + res, buf_size - res, \
431 #x " = %s\n", tmp_buf); \
432 } while (0)
433
434 wl12xx_for_each_wlvif(wl, wlvif) {
435 VIF_STATE_PRINT_INT(role_id);
436 VIF_STATE_PRINT_INT(bss_type);
437 VIF_STATE_PRINT_LHEX(flags);
438 VIF_STATE_PRINT_INT(p2p);
439 VIF_STATE_PRINT_INT(dev_role_id);
440 VIF_STATE_PRINT_INT(dev_hlid);
441
442 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
443 wlvif->bss_type == BSS_TYPE_IBSS) {
444 VIF_STATE_PRINT_INT(sta.hlid);
445 VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
446 VIF_STATE_PRINT_INT(sta.basic_rate_idx);
447 VIF_STATE_PRINT_INT(sta.ap_rate_idx);
448 VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
449 } else {
450 VIF_STATE_PRINT_INT(ap.global_hlid);
451 VIF_STATE_PRINT_INT(ap.bcast_hlid);
452 VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]);
453 VIF_STATE_PRINT_INT(ap.mgmt_rate_idx);
454 VIF_STATE_PRINT_INT(ap.bcast_rate_idx);
455 VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]);
456 VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]);
457 VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]);
458 VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
459 }
460 VIF_STATE_PRINT_INT(last_tx_hlid);
461 VIF_STATE_PRINT_LHEX(links_map[0]);
462 VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
463 VIF_STATE_PRINT_INT(band);
464 VIF_STATE_PRINT_INT(channel);
465 VIF_STATE_PRINT_HEX(bitrate_masks[0]);
466 VIF_STATE_PRINT_HEX(bitrate_masks[1]);
467 VIF_STATE_PRINT_HEX(basic_rate_set);
468 VIF_STATE_PRINT_HEX(basic_rate);
469 VIF_STATE_PRINT_HEX(rate_set);
470 VIF_STATE_PRINT_INT(beacon_int);
471 VIF_STATE_PRINT_INT(default_key);
472 VIF_STATE_PRINT_INT(aid);
473 VIF_STATE_PRINT_INT(session_counter);
474 VIF_STATE_PRINT_INT(ps_poll_failures);
475 VIF_STATE_PRINT_INT(psm_entry_retry);
476 VIF_STATE_PRINT_INT(power_level);
477 VIF_STATE_PRINT_INT(rssi_thold);
478 VIF_STATE_PRINT_INT(last_rssi_event);
479 VIF_STATE_PRINT_INT(ba_support);
480 VIF_STATE_PRINT_INT(ba_allowed);
481 VIF_STATE_PRINT_LLHEX(tx_security_seq);
482 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
483 }
484
485#undef VIF_STATE_PRINT_INT
486#undef VIF_STATE_PRINT_LONG
487#undef VIF_STATE_PRINT_HEX
488#undef VIF_STATE_PRINT_LHEX
489#undef VIF_STATE_PRINT_LLHEX
490#undef VIF_STATE_PRINT_STR
491#undef VIF_STATE_PRINT_NSTR
492#undef VIF_STATE_PRINT
493
494 mutex_unlock(&wl->mutex);
495
496 ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
497 kfree(buf);
498 return ret;
499}
500
501static const struct file_operations vifs_state_ops = {
502 .read = vifs_state_read,
503 .open = wl1271_open_file_generic,
504 .llseek = default_llseek,
505};
506
402static ssize_t dtim_interval_read(struct file *file, char __user *user_buf, 507static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos) 508 size_t count, loff_t *ppos)
404{ 509{
@@ -520,6 +625,7 @@ static ssize_t rx_streaming_interval_write(struct file *file,
520 size_t count, loff_t *ppos) 625 size_t count, loff_t *ppos)
521{ 626{
522 struct wl1271 *wl = file->private_data; 627 struct wl1271 *wl = file->private_data;
628 struct wl12xx_vif *wlvif;
523 unsigned long value; 629 unsigned long value;
524 int ret; 630 int ret;
525 631
@@ -543,7 +649,9 @@ static ssize_t rx_streaming_interval_write(struct file *file,
543 if (ret < 0) 649 if (ret < 0)
544 goto out; 650 goto out;
545 651
546 wl1271_recalc_rx_streaming(wl); 652 wl12xx_for_each_wlvif_sta(wl, wlvif) {
653 wl1271_recalc_rx_streaming(wl, wlvif);
654 }
547 655
548 wl1271_ps_elp_sleep(wl); 656 wl1271_ps_elp_sleep(wl);
549out: 657out:
@@ -572,6 +680,7 @@ static ssize_t rx_streaming_always_write(struct file *file,
572 size_t count, loff_t *ppos) 680 size_t count, loff_t *ppos)
573{ 681{
574 struct wl1271 *wl = file->private_data; 682 struct wl1271 *wl = file->private_data;
683 struct wl12xx_vif *wlvif;
575 unsigned long value; 684 unsigned long value;
576 int ret; 685 int ret;
577 686
@@ -595,7 +704,9 @@ static ssize_t rx_streaming_always_write(struct file *file,
595 if (ret < 0) 704 if (ret < 0)
596 goto out; 705 goto out;
597 706
598 wl1271_recalc_rx_streaming(wl); 707 wl12xx_for_each_wlvif_sta(wl, wlvif) {
708 wl1271_recalc_rx_streaming(wl, wlvif);
709 }
599 710
600 wl1271_ps_elp_sleep(wl); 711 wl1271_ps_elp_sleep(wl);
601out: 712out:
@@ -624,6 +735,7 @@ static ssize_t beacon_filtering_write(struct file *file,
624 size_t count, loff_t *ppos) 735 size_t count, loff_t *ppos)
625{ 736{
626 struct wl1271 *wl = file->private_data; 737 struct wl1271 *wl = file->private_data;
738 struct wl12xx_vif *wlvif;
627 char buf[10]; 739 char buf[10];
628 size_t len; 740 size_t len;
629 unsigned long value; 741 unsigned long value;
@@ -646,7 +758,9 @@ static ssize_t beacon_filtering_write(struct file *file,
646 if (ret < 0) 758 if (ret < 0)
647 goto out; 759 goto out;
648 760
649 ret = wl1271_acx_beacon_filter_opt(wl, !!value); 761 wl12xx_for_each_wlvif(wl, wlvif) {
762 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
763 }
650 764
651 wl1271_ps_elp_sleep(wl); 765 wl1271_ps_elp_sleep(wl);
652out: 766out:
@@ -770,6 +884,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
770 DEBUGFS_ADD(gpio_power, rootdir); 884 DEBUGFS_ADD(gpio_power, rootdir);
771 DEBUGFS_ADD(start_recovery, rootdir); 885 DEBUGFS_ADD(start_recovery, rootdir);
772 DEBUGFS_ADD(driver_state, rootdir); 886 DEBUGFS_ADD(driver_state, rootdir);
887 DEBUGFS_ADD(vifs_state, rootdir);
773 DEBUGFS_ADD(dtim_interval, rootdir); 888 DEBUGFS_ADD(dtim_interval, rootdir);
774 DEBUGFS_ADD(beacon_interval, rootdir); 889 DEBUGFS_ADD(beacon_interval, rootdir);
775 DEBUGFS_ADD(beacon_filtering, rootdir); 890 DEBUGFS_ADD(beacon_filtering, rootdir);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index 674ad2a9e409..00ce794eebae 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include "wl12xx.h" 24#include "wl12xx.h"
25#include "debug.h"
25#include "reg.h" 26#include "reg.h"
26#include "io.h" 27#include "io.h"
27#include "event.h" 28#include "event.h"
@@ -31,12 +32,16 @@
31 32
32void wl1271_pspoll_work(struct work_struct *work) 33void wl1271_pspoll_work(struct work_struct *work)
33{ 34{
35 struct ieee80211_vif *vif;
36 struct wl12xx_vif *wlvif;
34 struct delayed_work *dwork; 37 struct delayed_work *dwork;
35 struct wl1271 *wl; 38 struct wl1271 *wl;
36 int ret; 39 int ret;
37 40
38 dwork = container_of(work, struct delayed_work, work); 41 dwork = container_of(work, struct delayed_work, work);
39 wl = container_of(dwork, struct wl1271, pspoll_work); 42 wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work);
43 vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
44 wl = wlvif->wl;
40 45
41 wl1271_debug(DEBUG_EVENT, "pspoll work"); 46 wl1271_debug(DEBUG_EVENT, "pspoll work");
42 47
@@ -45,10 +50,10 @@ void wl1271_pspoll_work(struct work_struct *work)
45 if (unlikely(wl->state == WL1271_STATE_OFF)) 50 if (unlikely(wl->state == WL1271_STATE_OFF))
46 goto out; 51 goto out;
47 52
48 if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags)) 53 if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags))
49 goto out; 54 goto out;
50 55
51 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 56 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
52 goto out; 57 goto out;
53 58
54 /* 59 /*
@@ -60,31 +65,33 @@ void wl1271_pspoll_work(struct work_struct *work)
60 if (ret < 0) 65 if (ret < 0)
61 goto out; 66 goto out;
62 67
63 wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true); 68 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
69 wlvif->basic_rate, true);
64 70
65 wl1271_ps_elp_sleep(wl); 71 wl1271_ps_elp_sleep(wl);
66out: 72out:
67 mutex_unlock(&wl->mutex); 73 mutex_unlock(&wl->mutex);
68}; 74};
69 75
70static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl) 76static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl,
77 struct wl12xx_vif *wlvif)
71{ 78{
72 int delay = wl->conf.conn.ps_poll_recovery_period; 79 int delay = wl->conf.conn.ps_poll_recovery_period;
73 int ret; 80 int ret;
74 81
75 wl->ps_poll_failures++; 82 wlvif->ps_poll_failures++;
76 if (wl->ps_poll_failures == 1) 83 if (wlvif->ps_poll_failures == 1)
77 wl1271_info("AP with dysfunctional ps-poll, " 84 wl1271_info("AP with dysfunctional ps-poll, "
78 "trying to work around it."); 85 "trying to work around it.");
79 86
80 /* force active mode receive data from the AP */ 87 /* force active mode receive data from the AP */
81 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { 88 if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
82 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, 89 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
83 wl->basic_rate, true); 90 wlvif->basic_rate, true);
84 if (ret < 0) 91 if (ret < 0)
85 return; 92 return;
86 set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); 93 set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
87 ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work, 94 ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work,
88 msecs_to_jiffies(delay)); 95 msecs_to_jiffies(delay));
89 } 96 }
90 97
@@ -97,6 +104,7 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
97} 104}
98 105
99static int wl1271_event_ps_report(struct wl1271 *wl, 106static int wl1271_event_ps_report(struct wl1271 *wl,
107 struct wl12xx_vif *wlvif,
100 struct event_mailbox *mbox, 108 struct event_mailbox *mbox,
101 bool *beacon_loss) 109 bool *beacon_loss)
102{ 110{
@@ -109,41 +117,37 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
109 case EVENT_ENTER_POWER_SAVE_FAIL: 117 case EVENT_ENTER_POWER_SAVE_FAIL:
110 wl1271_debug(DEBUG_PSM, "PSM entry failed"); 118 wl1271_debug(DEBUG_PSM, "PSM entry failed");
111 119
112 if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) { 120 if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
113 /* remain in active mode */ 121 /* remain in active mode */
114 wl->psm_entry_retry = 0; 122 wlvif->psm_entry_retry = 0;
115 break; 123 break;
116 } 124 }
117 125
118 if (wl->psm_entry_retry < total_retries) { 126 if (wlvif->psm_entry_retry < total_retries) {
119 wl->psm_entry_retry++; 127 wlvif->psm_entry_retry++;
120 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, 128 ret = wl1271_ps_set_mode(wl, wlvif,
121 wl->basic_rate, true); 129 STATION_POWER_SAVE_MODE,
130 wlvif->basic_rate, true);
122 } else { 131 } else {
123 wl1271_info("No ack to nullfunc from AP."); 132 wl1271_info("No ack to nullfunc from AP.");
124 wl->psm_entry_retry = 0; 133 wlvif->psm_entry_retry = 0;
125 *beacon_loss = true; 134 *beacon_loss = true;
126 } 135 }
127 break; 136 break;
128 case EVENT_ENTER_POWER_SAVE_SUCCESS: 137 case EVENT_ENTER_POWER_SAVE_SUCCESS:
129 wl->psm_entry_retry = 0; 138 wlvif->psm_entry_retry = 0;
130
131 /* enable beacon filtering */
132 ret = wl1271_acx_beacon_filter_opt(wl, true);
133 if (ret < 0)
134 break;
135 139
136 /* 140 /*
137 * BET has only a minor effect in 5GHz and masks 141 * BET has only a minor effect in 5GHz and masks
138 * channel switch IEs, so we only enable BET on 2.4GHz 142 * channel switch IEs, so we only enable BET on 2.4GHz
139 */ 143 */
140 if (wl->band == IEEE80211_BAND_2GHZ) 144 if (wlvif->band == IEEE80211_BAND_2GHZ)
141 /* enable beacon early termination */ 145 /* enable beacon early termination */
142 ret = wl1271_acx_bet_enable(wl, true); 146 ret = wl1271_acx_bet_enable(wl, wlvif, true);
143 147
144 if (wl->ps_compl) { 148 if (wlvif->ps_compl) {
145 complete(wl->ps_compl); 149 complete(wlvif->ps_compl);
146 wl->ps_compl = NULL; 150 wlvif->ps_compl = NULL;
147 } 151 }
148 break; 152 break;
149 default: 153 default:
@@ -154,39 +158,44 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
154} 158}
155 159
156static void wl1271_event_rssi_trigger(struct wl1271 *wl, 160static void wl1271_event_rssi_trigger(struct wl1271 *wl,
161 struct wl12xx_vif *wlvif,
157 struct event_mailbox *mbox) 162 struct event_mailbox *mbox)
158{ 163{
164 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
159 enum nl80211_cqm_rssi_threshold_event event; 165 enum nl80211_cqm_rssi_threshold_event event;
160 s8 metric = mbox->rssi_snr_trigger_metric[0]; 166 s8 metric = mbox->rssi_snr_trigger_metric[0];
161 167
162 wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric); 168 wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
163 169
164 if (metric <= wl->rssi_thold) 170 if (metric <= wlvif->rssi_thold)
165 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; 171 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
166 else 172 else
167 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; 173 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
168 174
169 if (event != wl->last_rssi_event) 175 if (event != wlvif->last_rssi_event)
170 ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL); 176 ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
171 wl->last_rssi_event = event; 177 wlvif->last_rssi_event = event;
172} 178}
173 179
174static void wl1271_stop_ba_event(struct wl1271 *wl) 180static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
175{ 181{
176 if (wl->bss_type != BSS_TYPE_AP_BSS) { 182 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
177 if (!wl->ba_rx_bitmap) 183
184 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
185 if (!wlvif->sta.ba_rx_bitmap)
178 return; 186 return;
179 ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap, 187 ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
180 wl->bssid); 188 vif->bss_conf.bssid);
181 } else { 189 } else {
182 int i; 190 u8 hlid;
183 struct wl1271_link *lnk; 191 struct wl1271_link *lnk;
184 for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) { 192 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
185 lnk = &wl->links[i]; 193 WL12XX_MAX_LINKS) {
186 if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap) 194 lnk = &wl->links[hlid];
195 if (!lnk->ba_bitmap)
187 continue; 196 continue;
188 197
189 ieee80211_stop_rx_ba_session(wl->vif, 198 ieee80211_stop_rx_ba_session(vif,
190 lnk->ba_bitmap, 199 lnk->ba_bitmap,
191 lnk->addr); 200 lnk->addr);
192 } 201 }
@@ -196,14 +205,23 @@ static void wl1271_stop_ba_event(struct wl1271 *wl)
196static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl, 205static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
197 u8 enable) 206 u8 enable)
198{ 207{
208 struct ieee80211_vif *vif;
209 struct wl12xx_vif *wlvif;
210
199 if (enable) { 211 if (enable) {
200 /* disable dynamic PS when requested by the firmware */ 212 /* disable dynamic PS when requested by the firmware */
201 ieee80211_disable_dyn_ps(wl->vif); 213 wl12xx_for_each_wlvif_sta(wl, wlvif) {
214 vif = wl12xx_wlvif_to_vif(wlvif);
215 ieee80211_disable_dyn_ps(vif);
216 }
202 set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); 217 set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
203 } else { 218 } else {
204 ieee80211_enable_dyn_ps(wl->vif);
205 clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags); 219 clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
206 wl1271_recalc_rx_streaming(wl); 220 wl12xx_for_each_wlvif_sta(wl, wlvif) {
221 vif = wl12xx_wlvif_to_vif(wlvif);
222 ieee80211_enable_dyn_ps(vif);
223 wl1271_recalc_rx_streaming(wl, wlvif);
224 }
207 } 225 }
208 226
209} 227}
@@ -217,10 +235,11 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
217 235
218static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) 236static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
219{ 237{
238 struct ieee80211_vif *vif;
239 struct wl12xx_vif *wlvif;
220 int ret; 240 int ret;
221 u32 vector; 241 u32 vector;
222 bool beacon_loss = false; 242 bool beacon_loss = false;
223 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
224 bool disconnect_sta = false; 243 bool disconnect_sta = false;
225 unsigned long sta_bitmap = 0; 244 unsigned long sta_bitmap = 0;
226 245
@@ -234,7 +253,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
234 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 253 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
235 mbox->scheduled_scan_status); 254 mbox->scheduled_scan_status);
236 255
237 wl1271_scan_stm(wl); 256 wl1271_scan_stm(wl, wl->scan_vif);
238 } 257 }
239 258
240 if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) { 259 if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
@@ -253,8 +272,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
253 } 272 }
254 } 273 }
255 274
256 if (vector & SOFT_GEMINI_SENSE_EVENT_ID && 275 if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
257 wl->bss_type == BSS_TYPE_STA_BSS)
258 wl12xx_event_soft_gemini_sense(wl, 276 wl12xx_event_soft_gemini_sense(wl,
259 mbox->soft_gemini_sense_info); 277 mbox->soft_gemini_sense_info);
260 278
@@ -267,40 +285,54 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
267 * BSS_LOSE_EVENT, beacon loss has to be reported to the stack. 285 * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
268 * 286 *
269 */ 287 */
270 if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) { 288 if (vector & BSS_LOSE_EVENT_ID) {
289 /* TODO: check for multi-role */
271 wl1271_info("Beacon loss detected."); 290 wl1271_info("Beacon loss detected.");
272 291
273 /* indicate to the stack, that beacons have been lost */ 292 /* indicate to the stack, that beacons have been lost */
274 beacon_loss = true; 293 beacon_loss = true;
275 } 294 }
276 295
277 if ((vector & PS_REPORT_EVENT_ID) && !is_ap) { 296 if (vector & PS_REPORT_EVENT_ID) {
278 wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT"); 297 wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
279 ret = wl1271_event_ps_report(wl, mbox, &beacon_loss); 298 wl12xx_for_each_wlvif_sta(wl, wlvif) {
280 if (ret < 0) 299 ret = wl1271_event_ps_report(wl, wlvif,
281 return ret; 300 mbox, &beacon_loss);
301 if (ret < 0)
302 return ret;
303 }
282 } 304 }
283 305
284 if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap) 306 if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
285 wl1271_event_pspoll_delivery_fail(wl); 307 wl12xx_for_each_wlvif_sta(wl, wlvif) {
308 wl1271_event_pspoll_delivery_fail(wl, wlvif);
309 }
286 310
287 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { 311 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
312 /* TODO: check actual multi-role support */
288 wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT"); 313 wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
289 if (wl->vif) 314 wl12xx_for_each_wlvif_sta(wl, wlvif) {
290 wl1271_event_rssi_trigger(wl, mbox); 315 wl1271_event_rssi_trigger(wl, wlvif, mbox);
316 }
291 } 317 }
292 318
293 if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) { 319 if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
320 u8 role_id = mbox->role_id;
294 wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. " 321 wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
295 "ba_allowed = 0x%x", mbox->rx_ba_allowed); 322 "ba_allowed = 0x%x, role_id=%d",
323 mbox->rx_ba_allowed, role_id);
296 324
297 wl->ba_allowed = !!mbox->rx_ba_allowed; 325 wl12xx_for_each_wlvif(wl, wlvif) {
326 if (role_id != 0xff && role_id != wlvif->role_id)
327 continue;
298 328
299 if (wl->vif && !wl->ba_allowed) 329 wlvif->ba_allowed = !!mbox->rx_ba_allowed;
300 wl1271_stop_ba_event(wl); 330 if (!wlvif->ba_allowed)
331 wl1271_stop_ba_event(wl, wlvif);
332 }
301 } 333 }
302 334
303 if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) { 335 if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
304 wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. " 336 wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
305 "status = 0x%x", 337 "status = 0x%x",
306 mbox->channel_switch_status); 338 mbox->channel_switch_status);
@@ -309,50 +341,65 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
309 * 1) channel switch complete with status=0 341 * 1) channel switch complete with status=0
310 * 2) channel switch failed status=1 342 * 2) channel switch failed status=1
311 */ 343 */
312 if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) && 344
313 (wl->vif)) 345 /* TODO: configure only the relevant vif */
314 ieee80211_chswitch_done(wl->vif, 346 wl12xx_for_each_wlvif_sta(wl, wlvif) {
315 mbox->channel_switch_status ? false : true); 347 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
348 bool success;
349
350 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
351 &wl->flags))
352 continue;
353
354 success = mbox->channel_switch_status ? false : true;
355 ieee80211_chswitch_done(vif, success);
356 }
316 } 357 }
317 358
318 if ((vector & DUMMY_PACKET_EVENT_ID)) { 359 if ((vector & DUMMY_PACKET_EVENT_ID)) {
319 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); 360 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
320 if (wl->vif) 361 wl1271_tx_dummy_packet(wl);
321 wl1271_tx_dummy_packet(wl);
322 } 362 }
323 363
324 /* 364 /*
325 * "TX retries exceeded" has a different meaning according to mode. 365 * "TX retries exceeded" has a different meaning according to mode.
326 * In AP mode the offending station is disconnected. 366 * In AP mode the offending station is disconnected.
327 */ 367 */
328 if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) { 368 if (vector & MAX_TX_RETRY_EVENT_ID) {
329 wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID"); 369 wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
330 sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded); 370 sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
331 disconnect_sta = true; 371 disconnect_sta = true;
332 } 372 }
333 373
334 if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) { 374 if (vector & INACTIVE_STA_EVENT_ID) {
335 wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID"); 375 wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
336 sta_bitmap |= le16_to_cpu(mbox->sta_aging_status); 376 sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
337 disconnect_sta = true; 377 disconnect_sta = true;
338 } 378 }
339 379
340 if (is_ap && disconnect_sta) { 380 if (disconnect_sta) {
341 u32 num_packets = wl->conf.tx.max_tx_retries; 381 u32 num_packets = wl->conf.tx.max_tx_retries;
342 struct ieee80211_sta *sta; 382 struct ieee80211_sta *sta;
343 const u8 *addr; 383 const u8 *addr;
344 int h; 384 int h;
345 385
346 for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS); 386 for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
347 h < AP_MAX_LINKS; 387 bool found = false;
348 h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) { 388 /* find the ap vif connected to this sta */
349 if (!wl1271_is_active_sta(wl, h)) 389 wl12xx_for_each_wlvif_ap(wl, wlvif) {
390 if (!test_bit(h, wlvif->ap.sta_hlid_map))
391 continue;
392 found = true;
393 break;
394 }
395 if (!found)
350 continue; 396 continue;
351 397
398 vif = wl12xx_wlvif_to_vif(wlvif);
352 addr = wl->links[h].addr; 399 addr = wl->links[h].addr;
353 400
354 rcu_read_lock(); 401 rcu_read_lock();
355 sta = ieee80211_find_sta(wl->vif, addr); 402 sta = ieee80211_find_sta(vif, addr);
356 if (sta) { 403 if (sta) {
357 wl1271_debug(DEBUG_EVENT, "remove sta %d", h); 404 wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
358 ieee80211_report_low_ack(sta, num_packets); 405 ieee80211_report_low_ack(sta, num_packets);
@@ -361,8 +408,11 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
361 } 408 }
362 } 409 }
363 410
364 if (wl->vif && beacon_loss) 411 if (beacon_loss)
365 ieee80211_connection_loss(wl->vif); 412 wl12xx_for_each_wlvif_sta(wl, wlvif) {
413 vif = wl12xx_wlvif_to_vif(wlvif);
414 ieee80211_connection_loss(vif);
415 }
366 416
367 return 0; 417 return 0;
368} 418}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 49c1a0ede5b1..1d878ba47bf4 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -132,7 +132,4 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
132int wl1271_event_handle(struct wl1271 *wl, u8 mbox); 132int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
133void wl1271_pspoll_work(struct work_struct *work); 133void wl1271_pspoll_work(struct work_struct *work);
134 134
135/* Functions from main.c */
136bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
137
138#endif 135#endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 04db64c94e9a..ca7ee59e4505 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27 27
28#include "debug.h"
28#include "init.h" 29#include "init.h"
29#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
30#include "acx.h" 31#include "acx.h"
@@ -33,7 +34,7 @@
33#include "tx.h" 34#include "tx.h"
34#include "io.h" 35#include "io.h"
35 36
36int wl1271_sta_init_templates_config(struct wl1271 *wl) 37int wl1271_init_templates_config(struct wl1271 *wl)
37{ 38{
38 int ret, i; 39 int ret, i;
39 40
@@ -64,7 +65,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
64 65
65 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, 66 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
66 sizeof 67 sizeof
67 (struct wl12xx_qos_null_data_template), 68 (struct ieee80211_qos_hdr),
68 0, WL1271_RATE_AUTOMATIC); 69 0, WL1271_RATE_AUTOMATIC);
69 if (ret < 0) 70 if (ret < 0)
70 return ret; 71 return ret;
@@ -88,10 +89,33 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
88 if (ret < 0) 89 if (ret < 0)
89 return ret; 90 return ret;
90 91
92 /*
93 * Put very large empty placeholders for all templates. These
94 * reserve memory for later.
95 */
96 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
97 WL1271_CMD_TEMPL_MAX_SIZE,
98 0, WL1271_RATE_AUTOMATIC);
99 if (ret < 0)
100 return ret;
101
102 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
103 WL1271_CMD_TEMPL_MAX_SIZE,
104 0, WL1271_RATE_AUTOMATIC);
105 if (ret < 0)
106 return ret;
107
108 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
109 sizeof
110 (struct wl12xx_disconn_template),
111 0, WL1271_RATE_AUTOMATIC);
112 if (ret < 0)
113 return ret;
114
91 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { 115 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
92 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL, 116 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
93 WL1271_CMD_TEMPL_DFLT_SIZE, i, 117 sizeof(struct ieee80211_qos_hdr),
94 WL1271_RATE_AUTOMATIC); 118 i, WL1271_RATE_AUTOMATIC);
95 if (ret < 0) 119 if (ret < 0)
96 return ret; 120 return ret;
97 } 121 }
@@ -99,7 +123,8 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
99 return 0; 123 return 0;
100} 124}
101 125
102static int wl1271_ap_init_deauth_template(struct wl1271 *wl) 126static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
127 struct wl12xx_vif *wlvif)
103{ 128{
104 struct wl12xx_disconn_template *tmpl; 129 struct wl12xx_disconn_template *tmpl;
105 int ret; 130 int ret;
@@ -114,7 +139,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
114 tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | 139 tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
115 IEEE80211_STYPE_DEAUTH); 140 IEEE80211_STYPE_DEAUTH);
116 141
117 rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 142 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
118 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, 143 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
119 tmpl, sizeof(*tmpl), 0, rate); 144 tmpl, sizeof(*tmpl), 0, rate);
120 145
@@ -123,8 +148,10 @@ out:
123 return ret; 148 return ret;
124} 149}
125 150
126static int wl1271_ap_init_null_template(struct wl1271 *wl) 151static int wl1271_ap_init_null_template(struct wl1271 *wl,
152 struct ieee80211_vif *vif)
127{ 153{
154 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
128 struct ieee80211_hdr_3addr *nullfunc; 155 struct ieee80211_hdr_3addr *nullfunc;
129 int ret; 156 int ret;
130 u32 rate; 157 u32 rate;
@@ -141,10 +168,10 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl)
141 168
142 /* nullfunc->addr1 is filled by FW */ 169 /* nullfunc->addr1 is filled by FW */
143 170
144 memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN); 171 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
145 memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN); 172 memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
146 173
147 rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 174 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
148 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc, 175 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
149 sizeof(*nullfunc), 0, rate); 176 sizeof(*nullfunc), 0, rate);
150 177
@@ -153,8 +180,10 @@ out:
153 return ret; 180 return ret;
154} 181}
155 182
156static int wl1271_ap_init_qos_null_template(struct wl1271 *wl) 183static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
184 struct ieee80211_vif *vif)
157{ 185{
186 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
158 struct ieee80211_qos_hdr *qosnull; 187 struct ieee80211_qos_hdr *qosnull;
159 int ret; 188 int ret;
160 u32 rate; 189 u32 rate;
@@ -171,10 +200,10 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
171 200
172 /* qosnull->addr1 is filled by FW */ 201 /* qosnull->addr1 is filled by FW */
173 202
174 memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN); 203 memcpy(qosnull->addr2, vif->addr, ETH_ALEN);
175 memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN); 204 memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
176 205
177 rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 206 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
178 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull, 207 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
179 sizeof(*qosnull), 0, rate); 208 sizeof(*qosnull), 0, rate);
180 209
@@ -183,49 +212,6 @@ out:
183 return ret; 212 return ret;
184} 213}
185 214
186static int wl1271_ap_init_templates_config(struct wl1271 *wl)
187{
188 int ret;
189
190 /*
191 * Put very large empty placeholders for all templates. These
192 * reserve memory for later.
193 */
194 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
195 WL1271_CMD_TEMPL_MAX_SIZE,
196 0, WL1271_RATE_AUTOMATIC);
197 if (ret < 0)
198 return ret;
199
200 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
201 WL1271_CMD_TEMPL_MAX_SIZE,
202 0, WL1271_RATE_AUTOMATIC);
203 if (ret < 0)
204 return ret;
205
206 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
207 sizeof
208 (struct wl12xx_disconn_template),
209 0, WL1271_RATE_AUTOMATIC);
210 if (ret < 0)
211 return ret;
212
213 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
214 sizeof(struct wl12xx_null_data_template),
215 0, WL1271_RATE_AUTOMATIC);
216 if (ret < 0)
217 return ret;
218
219 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
220 sizeof
221 (struct wl12xx_qos_null_data_template),
222 0, WL1271_RATE_AUTOMATIC);
223 if (ret < 0)
224 return ret;
225
226 return 0;
227}
228
229static int wl12xx_init_rx_config(struct wl1271 *wl) 215static int wl12xx_init_rx_config(struct wl1271 *wl)
230{ 216{
231 int ret; 217 int ret;
@@ -237,39 +223,37 @@ static int wl12xx_init_rx_config(struct wl1271 *wl)
237 return 0; 223 return 0;
238} 224}
239 225
240int wl1271_init_phy_config(struct wl1271 *wl) 226static int wl12xx_init_phy_vif_config(struct wl1271 *wl,
227 struct wl12xx_vif *wlvif)
241{ 228{
242 int ret; 229 int ret;
243 230
244 ret = wl1271_acx_pd_threshold(wl); 231 ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME);
245 if (ret < 0)
246 return ret;
247
248 ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME);
249 if (ret < 0) 232 if (ret < 0)
250 return ret; 233 return ret;
251 234
252 ret = wl1271_acx_service_period_timeout(wl); 235 ret = wl1271_acx_service_period_timeout(wl, wlvif);
253 if (ret < 0) 236 if (ret < 0)
254 return ret; 237 return ret;
255 238
256 ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold); 239 ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold);
257 if (ret < 0) 240 if (ret < 0)
258 return ret; 241 return ret;
259 242
260 return 0; 243 return 0;
261} 244}
262 245
263static int wl1271_init_beacon_filter(struct wl1271 *wl) 246static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
247 struct wl12xx_vif *wlvif)
264{ 248{
265 int ret; 249 int ret;
266 250
267 /* disable beacon filtering at this stage */ 251 ret = wl1271_acx_beacon_filter_table(wl, wlvif);
268 ret = wl1271_acx_beacon_filter_opt(wl, false);
269 if (ret < 0) 252 if (ret < 0)
270 return ret; 253 return ret;
271 254
272 ret = wl1271_acx_beacon_filter_table(wl); 255 /* enable beacon filtering */
256 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
273 if (ret < 0) 257 if (ret < 0)
274 return ret; 258 return ret;
275 259
@@ -302,11 +286,12 @@ int wl1271_init_energy_detection(struct wl1271 *wl)
302 return 0; 286 return 0;
303} 287}
304 288
305static int wl1271_init_beacon_broadcast(struct wl1271 *wl) 289static int wl1271_init_beacon_broadcast(struct wl1271 *wl,
290 struct wl12xx_vif *wlvif)
306{ 291{
307 int ret; 292 int ret;
308 293
309 ret = wl1271_acx_bcn_dtim_options(wl); 294 ret = wl1271_acx_bcn_dtim_options(wl, wlvif);
310 if (ret < 0) 295 if (ret < 0)
311 return ret; 296 return ret;
312 297
@@ -327,36 +312,13 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
327 return 0; 312 return 0;
328} 313}
329 314
330static int wl1271_sta_hw_init(struct wl1271 *wl) 315/* generic sta initialization (non vif-specific) */
316static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
331{ 317{
332 int ret; 318 int ret;
333 319
334 if (wl->chip.id != CHIP_ID_1283_PG20) {
335 ret = wl1271_cmd_ext_radio_parms(wl);
336 if (ret < 0)
337 return ret;
338 }
339
340 /* PS config */ 320 /* PS config */
341 ret = wl1271_acx_config_ps(wl); 321 ret = wl12xx_acx_config_ps(wl, wlvif);
342 if (ret < 0)
343 return ret;
344
345 ret = wl1271_sta_init_templates_config(wl);
346 if (ret < 0)
347 return ret;
348
349 ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
350 if (ret < 0)
351 return ret;
352
353 /* Initialize connection monitoring thresholds */
354 ret = wl1271_acx_conn_monit_params(wl, false);
355 if (ret < 0)
356 return ret;
357
358 /* Beacon filtering */
359 ret = wl1271_init_beacon_filter(wl);
360 if (ret < 0) 322 if (ret < 0)
361 return ret; 323 return ret;
362 324
@@ -365,103 +327,61 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
365 if (ret < 0) 327 if (ret < 0)
366 return ret; 328 return ret;
367 329
368 /* Beacons and broadcast settings */ 330 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
369 ret = wl1271_init_beacon_broadcast(wl);
370 if (ret < 0)
371 return ret;
372
373 /* Configure for ELP power saving */
374 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
375 if (ret < 0)
376 return ret;
377
378 /* Configure rssi/snr averaging weights */
379 ret = wl1271_acx_rssi_snr_avg_weights(wl);
380 if (ret < 0)
381 return ret;
382
383 ret = wl1271_acx_sta_rate_policies(wl);
384 if (ret < 0)
385 return ret;
386
387 ret = wl12xx_acx_mem_cfg(wl);
388 if (ret < 0)
389 return ret;
390
391 /* Configure the FW logger */
392 ret = wl12xx_init_fwlog(wl);
393 if (ret < 0) 331 if (ret < 0)
394 return ret; 332 return ret;
395 333
396 return 0; 334 return 0;
397} 335}
398 336
399static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl) 337static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
338 struct ieee80211_vif *vif)
400{ 339{
340 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
401 int ret, i; 341 int ret, i;
402 342
403 /* disable all keep-alive templates */ 343 /* disable all keep-alive templates */
404 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { 344 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
405 ret = wl1271_acx_keep_alive_config(wl, i, 345 ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
406 ACX_KEEP_ALIVE_TPL_INVALID); 346 ACX_KEEP_ALIVE_TPL_INVALID);
407 if (ret < 0) 347 if (ret < 0)
408 return ret; 348 return ret;
409 } 349 }
410 350
411 /* disable the keep-alive feature */ 351 /* disable the keep-alive feature */
412 ret = wl1271_acx_keep_alive_mode(wl, false); 352 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
413 if (ret < 0) 353 if (ret < 0)
414 return ret; 354 return ret;
415 355
416 return 0; 356 return 0;
417} 357}
418 358
419static int wl1271_ap_hw_init(struct wl1271 *wl) 359/* generic ap initialization (non vif-specific) */
360static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
420{ 361{
421 int ret; 362 int ret;
422 363
423 ret = wl1271_ap_init_templates_config(wl); 364 ret = wl1271_init_ap_rates(wl, wlvif);
424 if (ret < 0)
425 return ret;
426
427 /* Configure for power always on */
428 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
429 if (ret < 0)
430 return ret;
431
432 ret = wl1271_init_ap_rates(wl);
433 if (ret < 0)
434 return ret;
435
436 ret = wl1271_acx_ap_max_tx_retry(wl);
437 if (ret < 0)
438 return ret;
439
440 ret = wl12xx_acx_mem_cfg(wl);
441 if (ret < 0)
442 return ret;
443
444 /* initialize Tx power */
445 ret = wl1271_acx_tx_power(wl, wl->power_level);
446 if (ret < 0) 365 if (ret < 0)
447 return ret; 366 return ret;
448 367
449 return 0; 368 return 0;
450} 369}
451 370
452int wl1271_ap_init_templates(struct wl1271 *wl) 371int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif)
453{ 372{
373 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
454 int ret; 374 int ret;
455 375
456 ret = wl1271_ap_init_deauth_template(wl); 376 ret = wl1271_ap_init_deauth_template(wl, wlvif);
457 if (ret < 0) 377 if (ret < 0)
458 return ret; 378 return ret;
459 379
460 ret = wl1271_ap_init_null_template(wl); 380 ret = wl1271_ap_init_null_template(wl, vif);
461 if (ret < 0) 381 if (ret < 0)
462 return ret; 382 return ret;
463 383
464 ret = wl1271_ap_init_qos_null_template(wl); 384 ret = wl1271_ap_init_qos_null_template(wl, vif);
465 if (ret < 0) 385 if (ret < 0)
466 return ret; 386 return ret;
467 387
@@ -469,43 +389,45 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
469 * when operating as AP we want to receive external beacons for 389 * when operating as AP we want to receive external beacons for
470 * configuring ERP protection. 390 * configuring ERP protection.
471 */ 391 */
472 ret = wl1271_acx_beacon_filter_opt(wl, false); 392 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
473 if (ret < 0) 393 if (ret < 0)
474 return ret; 394 return ret;
475 395
476 return 0; 396 return 0;
477} 397}
478 398
479static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl) 399static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl,
400 struct ieee80211_vif *vif)
480{ 401{
481 return wl1271_ap_init_templates(wl); 402 return wl1271_ap_init_templates(wl, vif);
482} 403}
483 404
484int wl1271_init_ap_rates(struct wl1271 *wl) 405int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
485{ 406{
486 int i, ret; 407 int i, ret;
487 struct conf_tx_rate_class rc; 408 struct conf_tx_rate_class rc;
488 u32 supported_rates; 409 u32 supported_rates;
489 410
490 wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set); 411 wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x",
412 wlvif->basic_rate_set);
491 413
492 if (wl->basic_rate_set == 0) 414 if (wlvif->basic_rate_set == 0)
493 return -EINVAL; 415 return -EINVAL;
494 416
495 rc.enabled_rates = wl->basic_rate_set; 417 rc.enabled_rates = wlvif->basic_rate_set;
496 rc.long_retry_limit = 10; 418 rc.long_retry_limit = 10;
497 rc.short_retry_limit = 10; 419 rc.short_retry_limit = 10;
498 rc.aflags = 0; 420 rc.aflags = 0;
499 ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE); 421 ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx);
500 if (ret < 0) 422 if (ret < 0)
501 return ret; 423 return ret;
502 424
503 /* use the min basic rate for AP broadcast/multicast */ 425 /* use the min basic rate for AP broadcast/multicast */
504 rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 426 rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
505 rc.short_retry_limit = 10; 427 rc.short_retry_limit = 10;
506 rc.long_retry_limit = 10; 428 rc.long_retry_limit = 10;
507 rc.aflags = 0; 429 rc.aflags = 0;
508 ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE); 430 ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx);
509 if (ret < 0) 431 if (ret < 0)
510 return ret; 432 return ret;
511 433
@@ -513,7 +435,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
513 * If the basic rates contain OFDM rates, use OFDM only 435 * If the basic rates contain OFDM rates, use OFDM only
514 * rates for unicast TX as well. Else use all supported rates. 436 * rates for unicast TX as well. Else use all supported rates.
515 */ 437 */
516 if ((wl->basic_rate_set & CONF_TX_OFDM_RATES)) 438 if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
517 supported_rates = CONF_TX_OFDM_RATES; 439 supported_rates = CONF_TX_OFDM_RATES;
518 else 440 else
519 supported_rates = CONF_TX_AP_ENABLED_RATES; 441 supported_rates = CONF_TX_AP_ENABLED_RATES;
@@ -527,7 +449,8 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
527 rc.short_retry_limit = 10; 449 rc.short_retry_limit = 10;
528 rc.long_retry_limit = 10; 450 rc.long_retry_limit = 10;
529 rc.aflags = 0; 451 rc.aflags = 0;
530 ret = wl1271_acx_ap_rate_policy(wl, &rc, i); 452 ret = wl1271_acx_ap_rate_policy(wl, &rc,
453 wlvif->ap.ucast_rate_idx[i]);
531 if (ret < 0) 454 if (ret < 0)
532 return ret; 455 return ret;
533 } 456 }
@@ -535,24 +458,23 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
535 return 0; 458 return 0;
536} 459}
537 460
538static int wl1271_set_ba_policies(struct wl1271 *wl) 461static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
539{ 462{
540 /* Reset the BA RX indicators */ 463 /* Reset the BA RX indicators */
541 wl->ba_rx_bitmap = 0; 464 wlvif->ba_allowed = true;
542 wl->ba_allowed = true;
543 wl->ba_rx_session_count = 0; 465 wl->ba_rx_session_count = 0;
544 466
545 /* BA is supported in STA/AP modes */ 467 /* BA is supported in STA/AP modes */
546 if (wl->bss_type != BSS_TYPE_AP_BSS && 468 if (wlvif->bss_type != BSS_TYPE_AP_BSS &&
547 wl->bss_type != BSS_TYPE_STA_BSS) { 469 wlvif->bss_type != BSS_TYPE_STA_BSS) {
548 wl->ba_support = false; 470 wlvif->ba_support = false;
549 return 0; 471 return 0;
550 } 472 }
551 473
552 wl->ba_support = true; 474 wlvif->ba_support = true;
553 475
554 /* 802.11n initiator BA session setting */ 476 /* 802.11n initiator BA session setting */
555 return wl12xx_acx_set_ba_initiator_policy(wl); 477 return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
556} 478}
557 479
558int wl1271_chip_specific_init(struct wl1271 *wl) 480int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -562,7 +484,7 @@ int wl1271_chip_specific_init(struct wl1271 *wl)
562 if (wl->chip.id == CHIP_ID_1283_PG20) { 484 if (wl->chip.id == CHIP_ID_1283_PG20) {
563 u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE; 485 u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
564 486
565 if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT) 487 if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
566 /* Enable SDIO padding */ 488 /* Enable SDIO padding */
567 host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK; 489 host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
568 490
@@ -575,39 +497,186 @@ out:
575 return ret; 497 return ret;
576} 498}
577 499
500/* vif-specifc initialization */
501static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
502{
503 int ret;
578 504
579int wl1271_hw_init(struct wl1271 *wl) 505 ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0);
506 if (ret < 0)
507 return ret;
508
509 /* Initialize connection monitoring thresholds */
510 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
511 if (ret < 0)
512 return ret;
513
514 /* Beacon filtering */
515 ret = wl1271_init_sta_beacon_filter(wl, wlvif);
516 if (ret < 0)
517 return ret;
518
519 /* Beacons and broadcast settings */
520 ret = wl1271_init_beacon_broadcast(wl, wlvif);
521 if (ret < 0)
522 return ret;
523
524 /* Configure rssi/snr averaging weights */
525 ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif);
526 if (ret < 0)
527 return ret;
528
529 return 0;
530}
531
532/* vif-specific intialization */
533static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
580{ 534{
535 int ret;
536
537 ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
538 if (ret < 0)
539 return ret;
540
541 /* initialize Tx power */
542 ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level);
543 if (ret < 0)
544 return ret;
545
546 return 0;
547}
548
549int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
550{
551 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
581 struct conf_tx_ac_category *conf_ac; 552 struct conf_tx_ac_category *conf_ac;
582 struct conf_tx_tid *conf_tid; 553 struct conf_tx_tid *conf_tid;
554 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
583 int ret, i; 555 int ret, i;
584 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
585 556
586 if (wl->chip.id == CHIP_ID_1283_PG20) 557 /*
587 ret = wl128x_cmd_general_parms(wl); 558 * consider all existing roles before configuring psm.
588 else 559 * TODO: reconfigure on interface removal.
589 ret = wl1271_cmd_general_parms(wl); 560 */
561 if (!wl->ap_count) {
562 if (is_ap) {
563 /* Configure for power always on */
564 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
565 if (ret < 0)
566 return ret;
567 } else if (!wl->sta_count) {
568 /* Configure for ELP power saving */
569 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
570 if (ret < 0)
571 return ret;
572 }
573 }
574
575 /* Mode specific init */
576 if (is_ap) {
577 ret = wl1271_ap_hw_init(wl, wlvif);
578 if (ret < 0)
579 return ret;
580
581 ret = wl12xx_init_ap_role(wl, wlvif);
582 if (ret < 0)
583 return ret;
584 } else {
585 ret = wl1271_sta_hw_init(wl, wlvif);
586 if (ret < 0)
587 return ret;
588
589 ret = wl12xx_init_sta_role(wl, wlvif);
590 if (ret < 0)
591 return ret;
592 }
593
594 wl12xx_init_phy_vif_config(wl, wlvif);
595
596 /* Default TID/AC configuration */
597 BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
598 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
599 conf_ac = &wl->conf.tx.ac_conf[i];
600 ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac,
601 conf_ac->cw_min, conf_ac->cw_max,
602 conf_ac->aifsn, conf_ac->tx_op_limit);
603 if (ret < 0)
604 return ret;
605
606 conf_tid = &wl->conf.tx.tid_conf[i];
607 ret = wl1271_acx_tid_cfg(wl, wlvif,
608 conf_tid->queue_id,
609 conf_tid->channel_type,
610 conf_tid->tsid,
611 conf_tid->ps_scheme,
612 conf_tid->ack_policy,
613 conf_tid->apsd_conf[0],
614 conf_tid->apsd_conf[1]);
615 if (ret < 0)
616 return ret;
617 }
618
619 /* Configure HW encryption */
620 ret = wl1271_acx_feature_cfg(wl, wlvif);
590 if (ret < 0) 621 if (ret < 0)
591 return ret; 622 return ret;
592 623
593 if (wl->chip.id == CHIP_ID_1283_PG20) 624 /* Mode specific init - post mem init */
594 ret = wl128x_cmd_radio_parms(wl); 625 if (is_ap)
626 ret = wl1271_ap_hw_init_post_mem(wl, vif);
595 else 627 else
596 ret = wl1271_cmd_radio_parms(wl); 628 ret = wl1271_sta_hw_init_post_mem(wl, vif);
629
630 if (ret < 0)
631 return ret;
632
633 /* Configure initiator BA sessions policies */
634 ret = wl1271_set_ba_policies(wl, wlvif);
597 if (ret < 0) 635 if (ret < 0)
598 return ret; 636 return ret;
599 637
638 return 0;
639}
640
641int wl1271_hw_init(struct wl1271 *wl)
642{
643 int ret;
644
645 if (wl->chip.id == CHIP_ID_1283_PG20) {
646 ret = wl128x_cmd_general_parms(wl);
647 if (ret < 0)
648 return ret;
649 ret = wl128x_cmd_radio_parms(wl);
650 if (ret < 0)
651 return ret;
652 } else {
653 ret = wl1271_cmd_general_parms(wl);
654 if (ret < 0)
655 return ret;
656 ret = wl1271_cmd_radio_parms(wl);
657 if (ret < 0)
658 return ret;
659 ret = wl1271_cmd_ext_radio_parms(wl);
660 if (ret < 0)
661 return ret;
662 }
663
600 /* Chip-specific init */ 664 /* Chip-specific init */
601 ret = wl1271_chip_specific_init(wl); 665 ret = wl1271_chip_specific_init(wl);
602 if (ret < 0) 666 if (ret < 0)
603 return ret; 667 return ret;
604 668
605 /* Mode specific init */ 669 /* Init templates */
606 if (is_ap) 670 ret = wl1271_init_templates_config(wl);
607 ret = wl1271_ap_hw_init(wl); 671 if (ret < 0)
608 else 672 return ret;
609 ret = wl1271_sta_hw_init(wl); 673
674 ret = wl12xx_acx_mem_cfg(wl);
675 if (ret < 0)
676 return ret;
610 677
678 /* Configure the FW logger */
679 ret = wl12xx_init_fwlog(wl);
611 if (ret < 0) 680 if (ret < 0)
612 return ret; 681 return ret;
613 682
@@ -626,11 +695,6 @@ int wl1271_hw_init(struct wl1271 *wl)
626 if (ret < 0) 695 if (ret < 0)
627 goto out_free_memmap; 696 goto out_free_memmap;
628 697
629 /* PHY layer config */
630 ret = wl1271_init_phy_config(wl);
631 if (ret < 0)
632 goto out_free_memmap;
633
634 ret = wl1271_acx_dco_itrim_params(wl); 698 ret = wl1271_acx_dco_itrim_params(wl);
635 if (ret < 0) 699 if (ret < 0)
636 goto out_free_memmap; 700 goto out_free_memmap;
@@ -655,61 +719,20 @@ int wl1271_hw_init(struct wl1271 *wl)
655 if (ret < 0) 719 if (ret < 0)
656 goto out_free_memmap; 720 goto out_free_memmap;
657 721
658 /* Default TID/AC configuration */
659 BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
660 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
661 conf_ac = &wl->conf.tx.ac_conf[i];
662 ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
663 conf_ac->cw_max, conf_ac->aifsn,
664 conf_ac->tx_op_limit);
665 if (ret < 0)
666 goto out_free_memmap;
667
668 conf_tid = &wl->conf.tx.tid_conf[i];
669 ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
670 conf_tid->channel_type,
671 conf_tid->tsid,
672 conf_tid->ps_scheme,
673 conf_tid->ack_policy,
674 conf_tid->apsd_conf[0],
675 conf_tid->apsd_conf[1]);
676 if (ret < 0)
677 goto out_free_memmap;
678 }
679
680 /* Enable data path */ 722 /* Enable data path */
681 ret = wl1271_cmd_data_path(wl, 1); 723 ret = wl1271_cmd_data_path(wl, 1);
682 if (ret < 0) 724 if (ret < 0)
683 goto out_free_memmap; 725 goto out_free_memmap;
684 726
685 /* Configure HW encryption */
686 ret = wl1271_acx_feature_cfg(wl);
687 if (ret < 0)
688 goto out_free_memmap;
689
690 /* configure PM */ 727 /* configure PM */
691 ret = wl1271_acx_pm_config(wl); 728 ret = wl1271_acx_pm_config(wl);
692 if (ret < 0) 729 if (ret < 0)
693 goto out_free_memmap; 730 goto out_free_memmap;
694 731
695 /* Mode specific init - post mem init */
696 if (is_ap)
697 ret = wl1271_ap_hw_init_post_mem(wl);
698 else
699 ret = wl1271_sta_hw_init_post_mem(wl);
700
701 if (ret < 0)
702 goto out_free_memmap;
703
704 ret = wl12xx_acx_set_rate_mgmt_params(wl); 732 ret = wl12xx_acx_set_rate_mgmt_params(wl);
705 if (ret < 0) 733 if (ret < 0)
706 goto out_free_memmap; 734 goto out_free_memmap;
707 735
708 /* Configure initiator BA sessions policies */
709 ret = wl1271_set_ba_policies(wl);
710 if (ret < 0)
711 goto out_free_memmap;
712
713 /* configure hangover */ 736 /* configure hangover */
714 ret = wl12xx_acx_config_hangover(wl); 737 ret = wl12xx_acx_config_hangover(wl);
715 if (ret < 0) 738 if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 3a3c230fd292..2da0f404ef6e 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,13 +27,13 @@
27#include "wl12xx.h" 27#include "wl12xx.h"
28 28
29int wl1271_hw_init_power_auth(struct wl1271 *wl); 29int wl1271_hw_init_power_auth(struct wl1271 *wl);
30int wl1271_sta_init_templates_config(struct wl1271 *wl); 30int wl1271_init_templates_config(struct wl1271 *wl);
31int wl1271_init_phy_config(struct wl1271 *wl);
32int wl1271_init_pta(struct wl1271 *wl); 31int wl1271_init_pta(struct wl1271 *wl);
33int wl1271_init_energy_detection(struct wl1271 *wl); 32int wl1271_init_energy_detection(struct wl1271 *wl);
34int wl1271_chip_specific_init(struct wl1271 *wl); 33int wl1271_chip_specific_init(struct wl1271 *wl);
35int wl1271_hw_init(struct wl1271 *wl); 34int wl1271_hw_init(struct wl1271 *wl);
36int wl1271_init_ap_rates(struct wl1271 *wl); 35int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
37int wl1271_ap_init_templates(struct wl1271 *wl); 36int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
37int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
38 38
39#endif 39#endif
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index c2da66f45046..079ad380e8ff 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -24,8 +24,10 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/interrupt.h>
27 28
28#include "wl12xx.h" 29#include "wl12xx.h"
30#include "debug.h"
29#include "wl12xx_80211.h" 31#include "wl12xx_80211.h"
30#include "io.h" 32#include "io.h"
31#include "tx.h" 33#include "tx.h"
@@ -46,7 +48,7 @@
46bool wl1271_set_block_size(struct wl1271 *wl) 48bool wl1271_set_block_size(struct wl1271 *wl)
47{ 49{
48 if (wl->if_ops->set_block_size) { 50 if (wl->if_ops->set_block_size) {
49 wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE); 51 wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
50 return true; 52 return true;
51 } 53 }
52 54
@@ -55,12 +57,12 @@ bool wl1271_set_block_size(struct wl1271 *wl)
55 57
56void wl1271_disable_interrupts(struct wl1271 *wl) 58void wl1271_disable_interrupts(struct wl1271 *wl)
57{ 59{
58 wl->if_ops->disable_irq(wl); 60 disable_irq(wl->irq);
59} 61}
60 62
61void wl1271_enable_interrupts(struct wl1271 *wl) 63void wl1271_enable_interrupts(struct wl1271 *wl)
62{ 64{
63 wl->if_ops->enable_irq(wl); 65 enable_irq(wl->irq);
64} 66}
65 67
66/* Set the SPI partitions to access the chip addresses 68/* Set the SPI partitions to access the chip addresses
@@ -128,13 +130,13 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition);
128void wl1271_io_reset(struct wl1271 *wl) 130void wl1271_io_reset(struct wl1271 *wl)
129{ 131{
130 if (wl->if_ops->reset) 132 if (wl->if_ops->reset)
131 wl->if_ops->reset(wl); 133 wl->if_ops->reset(wl->dev);
132} 134}
133 135
134void wl1271_io_init(struct wl1271 *wl) 136void wl1271_io_init(struct wl1271 *wl)
135{ 137{
136 if (wl->if_ops->init) 138 if (wl->if_ops->init)
137 wl->if_ops->init(wl); 139 wl->if_ops->init(wl->dev);
138} 140}
139 141
140void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val) 142void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index e839341dfafe..d398cbcea986 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -51,23 +51,17 @@ void wl1271_enable_interrupts(struct wl1271 *wl);
51void wl1271_io_reset(struct wl1271 *wl); 51void wl1271_io_reset(struct wl1271 *wl);
52void wl1271_io_init(struct wl1271 *wl); 52void wl1271_io_init(struct wl1271 *wl);
53 53
54static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
55{
56 return wl->if_ops->dev(wl);
57}
58
59
60/* Raw target IO, address is not translated */ 54/* Raw target IO, address is not translated */
61static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, 55static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
62 size_t len, bool fixed) 56 size_t len, bool fixed)
63{ 57{
64 wl->if_ops->write(wl, addr, buf, len, fixed); 58 wl->if_ops->write(wl->dev, addr, buf, len, fixed);
65} 59}
66 60
67static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, 61static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
68 size_t len, bool fixed) 62 size_t len, bool fixed)
69{ 63{
70 wl->if_ops->read(wl, addr, buf, len, fixed); 64 wl->if_ops->read(wl->dev, addr, buf, len, fixed);
71} 65}
72 66
73static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) 67static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
@@ -155,13 +149,13 @@ static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
155 149
156static inline void wl1271_power_off(struct wl1271 *wl) 150static inline void wl1271_power_off(struct wl1271 *wl)
157{ 151{
158 wl->if_ops->power(wl, false); 152 wl->if_ops->power(wl->dev, false);
159 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 153 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
160} 154}
161 155
162static inline int wl1271_power_on(struct wl1271 *wl) 156static inline int wl1271_power_on(struct wl1271 *wl)
163{ 157{
164 int ret = wl->if_ops->power(wl, true); 158 int ret = wl->if_ops->power(wl->dev, true);
165 if (ret == 0) 159 if (ret == 0)
166 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 160 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
167 161
@@ -176,15 +170,10 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
176int wl1271_set_partition(struct wl1271 *wl, 170int wl1271_set_partition(struct wl1271 *wl,
177 struct wl1271_partition_set *p); 171 struct wl1271_partition_set *p);
178 172
173bool wl1271_set_block_size(struct wl1271 *wl);
174
179/* Functions from wl1271_main.c */ 175/* Functions from wl1271_main.c */
180 176
181int wl1271_register_hw(struct wl1271 *wl);
182void wl1271_unregister_hw(struct wl1271 *wl);
183int wl1271_init_ieee80211(struct wl1271 *wl);
184struct ieee80211_hw *wl1271_alloc_hw(void);
185int wl1271_free_hw(struct wl1271 *wl);
186irqreturn_t wl1271_irq(int irq, void *data);
187bool wl1271_set_block_size(struct wl1271 *wl);
188int wl1271_tx_dummy_packet(struct wl1271 *wl); 177int wl1271_tx_dummy_packet(struct wl1271 *wl);
189 178
190#endif 179#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 884f82b63219..c3058419e227 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -32,8 +32,10 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/wl12xx.h> 33#include <linux/wl12xx.h>
34#include <linux/sched.h> 34#include <linux/sched.h>
35#include <linux/interrupt.h>
35 36
36#include "wl12xx.h" 37#include "wl12xx.h"
38#include "debug.h"
37#include "wl12xx_80211.h" 39#include "wl12xx_80211.h"
38#include "reg.h" 40#include "reg.h"
39#include "io.h" 41#include "io.h"
@@ -377,42 +379,30 @@ static char *fwlog_param;
377static bool bug_on_recovery; 379static bool bug_on_recovery;
378 380
379static void __wl1271_op_remove_interface(struct wl1271 *wl, 381static void __wl1271_op_remove_interface(struct wl1271 *wl,
382 struct ieee80211_vif *vif,
380 bool reset_tx_queues); 383 bool reset_tx_queues);
381static void wl1271_free_ap_keys(struct wl1271 *wl); 384static void wl1271_op_stop(struct ieee80211_hw *hw);
382 385static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
383
384static void wl1271_device_release(struct device *dev)
385{
386
387}
388
389static struct platform_device wl1271_device = {
390 .name = "wl1271",
391 .id = -1,
392
393 /* device model insists to have a release function */
394 .dev = {
395 .release = wl1271_device_release,
396 },
397};
398 386
399static DEFINE_MUTEX(wl_list_mutex); 387static DEFINE_MUTEX(wl_list_mutex);
400static LIST_HEAD(wl_list); 388static LIST_HEAD(wl_list);
401 389
402static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate) 390static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
391 unsigned char operstate)
403{ 392{
404 int ret; 393 int ret;
394
405 if (operstate != IF_OPER_UP) 395 if (operstate != IF_OPER_UP)
406 return 0; 396 return 0;
407 397
408 if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) 398 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
409 return 0; 399 return 0;
410 400
411 ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid); 401 ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
412 if (ret < 0) 402 if (ret < 0)
413 return ret; 403 return ret;
414 404
415 wl12xx_croc(wl, wl->role_id); 405 wl12xx_croc(wl, wlvif->role_id);
416 406
417 wl1271_info("Association completed."); 407 wl1271_info("Association completed.");
418 return 0; 408 return 0;
@@ -426,6 +416,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
426 struct ieee80211_hw *hw; 416 struct ieee80211_hw *hw;
427 struct wl1271 *wl; 417 struct wl1271 *wl;
428 struct wl1271 *wl_temp; 418 struct wl1271 *wl_temp;
419 struct wl12xx_vif *wlvif;
429 int ret = 0; 420 int ret = 0;
430 421
431 /* Check that this notification is for us. */ 422 /* Check that this notification is for us. */
@@ -459,17 +450,18 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
459 if (wl->state == WL1271_STATE_OFF) 450 if (wl->state == WL1271_STATE_OFF)
460 goto out; 451 goto out;
461 452
462 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 453 wl12xx_for_each_wlvif_sta(wl, wlvif) {
463 goto out; 454 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
464 455 continue;
465 ret = wl1271_ps_elp_wakeup(wl);
466 if (ret < 0)
467 goto out;
468 456
469 wl1271_check_operstate(wl, dev->operstate); 457 ret = wl1271_ps_elp_wakeup(wl);
458 if (ret < 0)
459 goto out;
470 460
471 wl1271_ps_elp_sleep(wl); 461 wl1271_check_operstate(wl, wlvif, dev->operstate);
472 462
463 wl1271_ps_elp_sleep(wl);
464 }
473out: 465out:
474 mutex_unlock(&wl->mutex); 466 mutex_unlock(&wl->mutex);
475 467
@@ -498,19 +490,20 @@ static int wl1271_reg_notify(struct wiphy *wiphy,
498 return 0; 490 return 0;
499} 491}
500 492
501static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable) 493static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
494 bool enable)
502{ 495{
503 int ret = 0; 496 int ret = 0;
504 497
505 /* we should hold wl->mutex */ 498 /* we should hold wl->mutex */
506 ret = wl1271_acx_ps_rx_streaming(wl, enable); 499 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
507 if (ret < 0) 500 if (ret < 0)
508 goto out; 501 goto out;
509 502
510 if (enable) 503 if (enable)
511 set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); 504 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
512 else 505 else
513 clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); 506 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
514out: 507out:
515 return ret; 508 return ret;
516} 509}
@@ -519,25 +512,25 @@ out:
519 * this function is being called when the rx_streaming interval 512 * this function is being called when the rx_streaming interval
520 * has beed changed or rx_streaming should be disabled 513 * has beed changed or rx_streaming should be disabled
521 */ 514 */
522int wl1271_recalc_rx_streaming(struct wl1271 *wl) 515int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
523{ 516{
524 int ret = 0; 517 int ret = 0;
525 int period = wl->conf.rx_streaming.interval; 518 int period = wl->conf.rx_streaming.interval;
526 519
527 /* don't reconfigure if rx_streaming is disabled */ 520 /* don't reconfigure if rx_streaming is disabled */
528 if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) 521 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
529 goto out; 522 goto out;
530 523
531 /* reconfigure/disable according to new streaming_period */ 524 /* reconfigure/disable according to new streaming_period */
532 if (period && 525 if (period &&
533 test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) && 526 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
534 (wl->conf.rx_streaming.always || 527 (wl->conf.rx_streaming.always ||
535 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) 528 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
536 ret = wl1271_set_rx_streaming(wl, true); 529 ret = wl1271_set_rx_streaming(wl, wlvif, true);
537 else { 530 else {
538 ret = wl1271_set_rx_streaming(wl, false); 531 ret = wl1271_set_rx_streaming(wl, wlvif, false);
539 /* don't cancel_work_sync since we might deadlock */ 532 /* don't cancel_work_sync since we might deadlock */
540 del_timer_sync(&wl->rx_streaming_timer); 533 del_timer_sync(&wlvif->rx_streaming_timer);
541 } 534 }
542out: 535out:
543 return ret; 536 return ret;
@@ -546,13 +539,14 @@ out:
546static void wl1271_rx_streaming_enable_work(struct work_struct *work) 539static void wl1271_rx_streaming_enable_work(struct work_struct *work)
547{ 540{
548 int ret; 541 int ret;
549 struct wl1271 *wl = 542 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
550 container_of(work, struct wl1271, rx_streaming_enable_work); 543 rx_streaming_enable_work);
544 struct wl1271 *wl = wlvif->wl;
551 545
552 mutex_lock(&wl->mutex); 546 mutex_lock(&wl->mutex);
553 547
554 if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) || 548 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
555 !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || 549 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
556 (!wl->conf.rx_streaming.always && 550 (!wl->conf.rx_streaming.always &&
557 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) 551 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
558 goto out; 552 goto out;
@@ -564,12 +558,12 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
564 if (ret < 0) 558 if (ret < 0)
565 goto out; 559 goto out;
566 560
567 ret = wl1271_set_rx_streaming(wl, true); 561 ret = wl1271_set_rx_streaming(wl, wlvif, true);
568 if (ret < 0) 562 if (ret < 0)
569 goto out_sleep; 563 goto out_sleep;
570 564
571 /* stop it after some time of inactivity */ 565 /* stop it after some time of inactivity */
572 mod_timer(&wl->rx_streaming_timer, 566 mod_timer(&wlvif->rx_streaming_timer,
573 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); 567 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
574 568
575out_sleep: 569out_sleep:
@@ -581,19 +575,20 @@ out:
581static void wl1271_rx_streaming_disable_work(struct work_struct *work) 575static void wl1271_rx_streaming_disable_work(struct work_struct *work)
582{ 576{
583 int ret; 577 int ret;
584 struct wl1271 *wl = 578 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
585 container_of(work, struct wl1271, rx_streaming_disable_work); 579 rx_streaming_disable_work);
580 struct wl1271 *wl = wlvif->wl;
586 581
587 mutex_lock(&wl->mutex); 582 mutex_lock(&wl->mutex);
588 583
589 if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) 584 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
590 goto out; 585 goto out;
591 586
592 ret = wl1271_ps_elp_wakeup(wl); 587 ret = wl1271_ps_elp_wakeup(wl);
593 if (ret < 0) 588 if (ret < 0)
594 goto out; 589 goto out;
595 590
596 ret = wl1271_set_rx_streaming(wl, false); 591 ret = wl1271_set_rx_streaming(wl, wlvif, false);
597 if (ret) 592 if (ret)
598 goto out_sleep; 593 goto out_sleep;
599 594
@@ -605,8 +600,9 @@ out:
605 600
606static void wl1271_rx_streaming_timer(unsigned long data) 601static void wl1271_rx_streaming_timer(unsigned long data)
607{ 602{
608 struct wl1271 *wl = (struct wl1271 *)data; 603 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
609 ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work); 604 struct wl1271 *wl = wlvif->wl;
605 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
610} 606}
611 607
612static void wl1271_conf_init(struct wl1271 *wl) 608static void wl1271_conf_init(struct wl1271 *wl)
@@ -645,9 +641,7 @@ static void wl1271_conf_init(struct wl1271 *wl)
645 641
646static int wl1271_plt_init(struct wl1271 *wl) 642static int wl1271_plt_init(struct wl1271 *wl)
647{ 643{
648 struct conf_tx_ac_category *conf_ac; 644 int ret;
649 struct conf_tx_tid *conf_tid;
650 int ret, i;
651 645
652 if (wl->chip.id == CHIP_ID_1283_PG20) 646 if (wl->chip.id == CHIP_ID_1283_PG20)
653 ret = wl128x_cmd_general_parms(wl); 647 ret = wl128x_cmd_general_parms(wl);
@@ -676,74 +670,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
676 if (ret < 0) 670 if (ret < 0)
677 return ret; 671 return ret;
678 672
679 ret = wl1271_sta_init_templates_config(wl);
680 if (ret < 0)
681 return ret;
682
683 ret = wl1271_acx_init_mem_config(wl); 673 ret = wl1271_acx_init_mem_config(wl);
684 if (ret < 0) 674 if (ret < 0)
685 return ret; 675 return ret;
686 676
687 /* PHY layer config */
688 ret = wl1271_init_phy_config(wl);
689 if (ret < 0)
690 goto out_free_memmap;
691
692 ret = wl1271_acx_dco_itrim_params(wl);
693 if (ret < 0)
694 goto out_free_memmap;
695
696 /* Initialize connection monitoring thresholds */
697 ret = wl1271_acx_conn_monit_params(wl, false);
698 if (ret < 0)
699 goto out_free_memmap;
700
701 /* Bluetooth WLAN coexistence */
702 ret = wl1271_init_pta(wl);
703 if (ret < 0)
704 goto out_free_memmap;
705
706 /* FM WLAN coexistence */
707 ret = wl1271_acx_fm_coex(wl);
708 if (ret < 0)
709 goto out_free_memmap;
710
711 /* Energy detection */
712 ret = wl1271_init_energy_detection(wl);
713 if (ret < 0)
714 goto out_free_memmap;
715
716 ret = wl12xx_acx_mem_cfg(wl); 677 ret = wl12xx_acx_mem_cfg(wl);
717 if (ret < 0) 678 if (ret < 0)
718 goto out_free_memmap; 679 goto out_free_memmap;
719 680
720 /* Default fragmentation threshold */
721 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
722 if (ret < 0)
723 goto out_free_memmap;
724
725 /* Default TID/AC configuration */
726 BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
727 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
728 conf_ac = &wl->conf.tx.ac_conf[i];
729 ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
730 conf_ac->cw_max, conf_ac->aifsn,
731 conf_ac->tx_op_limit);
732 if (ret < 0)
733 goto out_free_memmap;
734
735 conf_tid = &wl->conf.tx.tid_conf[i];
736 ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
737 conf_tid->channel_type,
738 conf_tid->tsid,
739 conf_tid->ps_scheme,
740 conf_tid->ack_policy,
741 conf_tid->apsd_conf[0],
742 conf_tid->apsd_conf[1]);
743 if (ret < 0)
744 goto out_free_memmap;
745 }
746
747 /* Enable data path */ 681 /* Enable data path */
748 ret = wl1271_cmd_data_path(wl, 1); 682 ret = wl1271_cmd_data_path(wl, 1);
749 if (ret < 0) 683 if (ret < 0)
@@ -768,14 +702,12 @@ static int wl1271_plt_init(struct wl1271 *wl)
768 return ret; 702 return ret;
769} 703}
770 704
771static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts) 705static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
706 struct wl12xx_vif *wlvif,
707 u8 hlid, u8 tx_pkts)
772{ 708{
773 bool fw_ps, single_sta; 709 bool fw_ps, single_sta;
774 710
775 /* only regulate station links */
776 if (hlid < WL1271_AP_STA_HLID_START)
777 return;
778
779 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 711 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
780 single_sta = (wl->active_sta_count == 1); 712 single_sta = (wl->active_sta_count == 1);
781 713
@@ -784,7 +716,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
784 * packets in FW or if the STA is awake. 716 * packets in FW or if the STA is awake.
785 */ 717 */
786 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS) 718 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
787 wl1271_ps_link_end(wl, hlid); 719 wl12xx_ps_link_end(wl, wlvif, hlid);
788 720
789 /* 721 /*
790 * Start high-level PS if the STA is asleep with enough blocks in FW. 722 * Start high-level PS if the STA is asleep with enough blocks in FW.
@@ -792,24 +724,14 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
792 * case FW-memory congestion is not a problem. 724 * case FW-memory congestion is not a problem.
793 */ 725 */
794 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 726 else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
795 wl1271_ps_link_start(wl, hlid, true); 727 wl12xx_ps_link_start(wl, wlvif, hlid, true);
796}
797
798bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
799{
800 int id;
801
802 /* global/broadcast "stations" are always active */
803 if (hlid < WL1271_AP_STA_HLID_START)
804 return true;
805
806 id = hlid - WL1271_AP_STA_HLID_START;
807 return test_bit(id, wl->ap_hlid_map);
808} 728}
809 729
810static void wl12xx_irq_update_links_status(struct wl1271 *wl, 730static void wl12xx_irq_update_links_status(struct wl1271 *wl,
731 struct wl12xx_vif *wlvif,
811 struct wl12xx_fw_status *status) 732 struct wl12xx_fw_status *status)
812{ 733{
734 struct wl1271_link *lnk;
813 u32 cur_fw_ps_map; 735 u32 cur_fw_ps_map;
814 u8 hlid, cnt; 736 u8 hlid, cnt;
815 737
@@ -825,25 +747,22 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
825 wl->ap_fw_ps_map = cur_fw_ps_map; 747 wl->ap_fw_ps_map = cur_fw_ps_map;
826 } 748 }
827 749
828 for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) { 750 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
829 if (!wl1271_is_active_sta(wl, hlid)) 751 lnk = &wl->links[hlid];
830 continue; 752 cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
831
832 cnt = status->tx_lnk_free_pkts[hlid] -
833 wl->links[hlid].prev_freed_pkts;
834 753
835 wl->links[hlid].prev_freed_pkts = 754 lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
836 status->tx_lnk_free_pkts[hlid]; 755 lnk->allocated_pkts -= cnt;
837 wl->links[hlid].allocated_pkts -= cnt;
838 756
839 wl12xx_irq_ps_regulate_link(wl, hlid, 757 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
840 wl->links[hlid].allocated_pkts); 758 lnk->allocated_pkts);
841 } 759 }
842} 760}
843 761
844static void wl12xx_fw_status(struct wl1271 *wl, 762static void wl12xx_fw_status(struct wl1271 *wl,
845 struct wl12xx_fw_status *status) 763 struct wl12xx_fw_status *status)
846{ 764{
765 struct wl12xx_vif *wlvif;
847 struct timespec ts; 766 struct timespec ts;
848 u32 old_tx_blk_count = wl->tx_blocks_available; 767 u32 old_tx_blk_count = wl->tx_blocks_available;
849 int avail, freed_blocks; 768 int avail, freed_blocks;
@@ -898,8 +817,9 @@ static void wl12xx_fw_status(struct wl1271 *wl,
898 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 817 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
899 818
900 /* for AP update num of allocated TX blocks per link and ps status */ 819 /* for AP update num of allocated TX blocks per link and ps status */
901 if (wl->bss_type == BSS_TYPE_AP_BSS) 820 wl12xx_for_each_wlvif_ap(wl, wlvif) {
902 wl12xx_irq_update_links_status(wl, status); 821 wl12xx_irq_update_links_status(wl, wlvif, status);
822 }
903 823
904 /* update the host-chipset time offset */ 824 /* update the host-chipset time offset */
905 getnstimeofday(&ts); 825 getnstimeofday(&ts);
@@ -932,7 +852,7 @@ static void wl1271_netstack_work(struct work_struct *work)
932 852
933#define WL1271_IRQ_MAX_LOOPS 256 853#define WL1271_IRQ_MAX_LOOPS 256
934 854
935irqreturn_t wl1271_irq(int irq, void *cookie) 855static irqreturn_t wl1271_irq(int irq, void *cookie)
936{ 856{
937 int ret; 857 int ret;
938 u32 intr; 858 u32 intr;
@@ -1054,7 +974,6 @@ out:
1054 974
1055 return IRQ_HANDLED; 975 return IRQ_HANDLED;
1056} 976}
1057EXPORT_SYMBOL_GPL(wl1271_irq);
1058 977
1059static int wl1271_fetch_firmware(struct wl1271 *wl) 978static int wl1271_fetch_firmware(struct wl1271 *wl)
1060{ 979{
@@ -1069,10 +988,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
1069 988
1070 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); 989 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
1071 990
1072 ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl)); 991 ret = request_firmware(&fw, fw_name, wl->dev);
1073 992
1074 if (ret < 0) { 993 if (ret < 0) {
1075 wl1271_error("could not get firmware: %d", ret); 994 wl1271_error("could not get firmware %s: %d", fw_name, ret);
1076 return ret; 995 return ret;
1077 } 996 }
1078 997
@@ -1107,10 +1026,11 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
1107 const struct firmware *fw; 1026 const struct firmware *fw;
1108 int ret; 1027 int ret;
1109 1028
1110 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl)); 1029 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
1111 1030
1112 if (ret < 0) { 1031 if (ret < 0) {
1113 wl1271_error("could not get nvs file: %d", ret); 1032 wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
1033 ret);
1114 return ret; 1034 return ret;
1115 } 1035 }
1116 1036
@@ -1217,11 +1137,13 @@ static void wl1271_recovery_work(struct work_struct *work)
1217{ 1137{
1218 struct wl1271 *wl = 1138 struct wl1271 *wl =
1219 container_of(work, struct wl1271, recovery_work); 1139 container_of(work, struct wl1271, recovery_work);
1140 struct wl12xx_vif *wlvif;
1141 struct ieee80211_vif *vif;
1220 1142
1221 mutex_lock(&wl->mutex); 1143 mutex_lock(&wl->mutex);
1222 1144
1223 if (wl->state != WL1271_STATE_ON) 1145 if (wl->state != WL1271_STATE_ON)
1224 goto out; 1146 goto out_unlock;
1225 1147
1226 /* Avoid a recursive recovery */ 1148 /* Avoid a recursive recovery */
1227 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 1149 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1238,9 +1160,12 @@ static void wl1271_recovery_work(struct work_struct *work)
1238 * in the firmware during recovery. This doens't hurt if the network is 1160 * in the firmware during recovery. This doens't hurt if the network is
1239 * not encrypted. 1161 * not encrypted.
1240 */ 1162 */
1241 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || 1163 wl12xx_for_each_wlvif(wl, wlvif) {
1242 test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) 1164 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
1243 wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING; 1165 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1166 wlvif->tx_security_seq +=
1167 WL1271_TX_SQN_POST_RECOVERY_PADDING;
1168 }
1244 1169
1245 /* Prevent spurious TX during FW restart */ 1170 /* Prevent spurious TX during FW restart */
1246 ieee80211_stop_queues(wl->hw); 1171 ieee80211_stop_queues(wl->hw);
@@ -1251,7 +1176,14 @@ static void wl1271_recovery_work(struct work_struct *work)
1251 } 1176 }
1252 1177
1253 /* reboot the chipset */ 1178 /* reboot the chipset */
1254 __wl1271_op_remove_interface(wl, false); 1179 while (!list_empty(&wl->wlvif_list)) {
1180 wlvif = list_first_entry(&wl->wlvif_list,
1181 struct wl12xx_vif, list);
1182 vif = wl12xx_wlvif_to_vif(wlvif);
1183 __wl1271_op_remove_interface(wl, vif, false);
1184 }
1185 mutex_unlock(&wl->mutex);
1186 wl1271_op_stop(wl->hw);
1255 1187
1256 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 1188 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1257 1189
@@ -1262,8 +1194,8 @@ static void wl1271_recovery_work(struct work_struct *work)
1262 * to restart the HW. 1194 * to restart the HW.
1263 */ 1195 */
1264 ieee80211_wake_queues(wl->hw); 1196 ieee80211_wake_queues(wl->hw);
1265 1197 return;
1266out: 1198out_unlock:
1267 mutex_unlock(&wl->mutex); 1199 mutex_unlock(&wl->mutex);
1268} 1200}
1269 1201
@@ -1318,7 +1250,16 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
1318 /* 0. read chip id from CHIP_ID */ 1250 /* 0. read chip id from CHIP_ID */
1319 wl->chip.id = wl1271_read32(wl, CHIP_ID_B); 1251 wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
1320 1252
1321 /* 1. check if chip id is valid */ 1253 /*
1254 * For wl127x based devices we could use the default block
1255 * size (512 bytes), but due to a bug in the sdio driver, we
1256 * need to set it explicitly after the chip is powered on. To
1257 * simplify the code and since the performance impact is
1258 * negligible, we use the same block size for all different
1259 * chip types.
1260 */
1261 if (!wl1271_set_block_size(wl))
1262 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1322 1263
1323 switch (wl->chip.id) { 1264 switch (wl->chip.id) {
1324 case CHIP_ID_1271_PG10: 1265 case CHIP_ID_1271_PG10:
@@ -1328,7 +1269,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
1328 ret = wl1271_setup(wl); 1269 ret = wl1271_setup(wl);
1329 if (ret < 0) 1270 if (ret < 0)
1330 goto out; 1271 goto out;
1272 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1331 break; 1273 break;
1274
1332 case CHIP_ID_1271_PG20: 1275 case CHIP_ID_1271_PG20:
1333 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 1276 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
1334 wl->chip.id); 1277 wl->chip.id);
@@ -1336,7 +1279,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
1336 ret = wl1271_setup(wl); 1279 ret = wl1271_setup(wl);
1337 if (ret < 0) 1280 if (ret < 0)
1338 goto out; 1281 goto out;
1282 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1339 break; 1283 break;
1284
1340 case CHIP_ID_1283_PG20: 1285 case CHIP_ID_1283_PG20:
1341 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)", 1286 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
1342 wl->chip.id); 1287 wl->chip.id);
@@ -1344,9 +1289,6 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
1344 ret = wl1271_setup(wl); 1289 ret = wl1271_setup(wl);
1345 if (ret < 0) 1290 if (ret < 0)
1346 goto out; 1291 goto out;
1347
1348 if (wl1271_set_block_size(wl))
1349 wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
1350 break; 1292 break;
1351 case CHIP_ID_1283_PG10: 1293 case CHIP_ID_1283_PG10:
1352 default: 1294 default:
@@ -1389,8 +1331,6 @@ int wl1271_plt_start(struct wl1271 *wl)
1389 goto out; 1331 goto out;
1390 } 1332 }
1391 1333
1392 wl->bss_type = BSS_TYPE_STA_BSS;
1393
1394 while (retries) { 1334 while (retries) {
1395 retries--; 1335 retries--;
1396 ret = wl1271_chip_wakeup(wl); 1336 ret = wl1271_chip_wakeup(wl);
@@ -1482,33 +1422,34 @@ int wl1271_plt_stop(struct wl1271 *wl)
1482static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1422static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1483{ 1423{
1484 struct wl1271 *wl = hw->priv; 1424 struct wl1271 *wl = hw->priv;
1425 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1426 struct ieee80211_vif *vif = info->control.vif;
1427 struct wl12xx_vif *wlvif = NULL;
1485 unsigned long flags; 1428 unsigned long flags;
1486 int q, mapping; 1429 int q, mapping;
1487 u8 hlid = 0; 1430 u8 hlid;
1431
1432 if (vif)
1433 wlvif = wl12xx_vif_to_data(vif);
1488 1434
1489 mapping = skb_get_queue_mapping(skb); 1435 mapping = skb_get_queue_mapping(skb);
1490 q = wl1271_tx_get_queue(mapping); 1436 q = wl1271_tx_get_queue(mapping);
1491 1437
1492 if (wl->bss_type == BSS_TYPE_AP_BSS) 1438 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
1493 hlid = wl12xx_tx_get_hlid_ap(wl, skb);
1494 1439
1495 spin_lock_irqsave(&wl->wl_lock, flags); 1440 spin_lock_irqsave(&wl->wl_lock, flags);
1496 1441
1497 /* queue the packet */ 1442 /* queue the packet */
1498 if (wl->bss_type == BSS_TYPE_AP_BSS) { 1443 if (hlid == WL12XX_INVALID_LINK_ID ||
1499 if (!wl1271_is_active_sta(wl, hlid)) { 1444 (wlvif && !test_bit(hlid, wlvif->links_map))) {
1500 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", 1445 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1501 hlid, q); 1446 ieee80211_free_txskb(hw, skb);
1502 dev_kfree_skb(skb); 1447 goto out;
1503 goto out;
1504 }
1505
1506 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
1507 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1508 } else {
1509 skb_queue_tail(&wl->tx_queue[q], skb);
1510 } 1448 }
1511 1449
1450 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
1451 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1452
1512 wl->tx_queue_count[q]++; 1453 wl->tx_queue_count[q]++;
1513 1454
1514 /* 1455 /*
@@ -1609,13 +1550,14 @@ static struct notifier_block wl1271_dev_notifier = {
1609}; 1550};
1610 1551
1611#ifdef CONFIG_PM 1552#ifdef CONFIG_PM
1612static int wl1271_configure_suspend_sta(struct wl1271 *wl) 1553static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1554 struct wl12xx_vif *wlvif)
1613{ 1555{
1614 int ret = 0; 1556 int ret = 0;
1615 1557
1616 mutex_lock(&wl->mutex); 1558 mutex_lock(&wl->mutex);
1617 1559
1618 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 1560 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1619 goto out_unlock; 1561 goto out_unlock;
1620 1562
1621 ret = wl1271_ps_elp_wakeup(wl); 1563 ret = wl1271_ps_elp_wakeup(wl);
@@ -1623,12 +1565,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
1623 goto out_unlock; 1565 goto out_unlock;
1624 1566
1625 /* enter psm if needed*/ 1567 /* enter psm if needed*/
1626 if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) { 1568 if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
1627 DECLARE_COMPLETION_ONSTACK(compl); 1569 DECLARE_COMPLETION_ONSTACK(compl);
1628 1570
1629 wl->ps_compl = &compl; 1571 wlvif->ps_compl = &compl;
1630 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, 1572 ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
1631 wl->basic_rate, true); 1573 wlvif->basic_rate, true);
1632 if (ret < 0) 1574 if (ret < 0)
1633 goto out_sleep; 1575 goto out_sleep;
1634 1576
@@ -1638,42 +1580,43 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
1638 1580
1639 ret = wait_for_completion_timeout( 1581 ret = wait_for_completion_timeout(
1640 &compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT)); 1582 &compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
1583
1584 mutex_lock(&wl->mutex);
1641 if (ret <= 0) { 1585 if (ret <= 0) {
1642 wl1271_warning("couldn't enter ps mode!"); 1586 wl1271_warning("couldn't enter ps mode!");
1643 ret = -EBUSY; 1587 ret = -EBUSY;
1644 goto out; 1588 goto out_cleanup;
1645 } 1589 }
1646 1590
1647 /* take mutex again, and wakeup */
1648 mutex_lock(&wl->mutex);
1649
1650 ret = wl1271_ps_elp_wakeup(wl); 1591 ret = wl1271_ps_elp_wakeup(wl);
1651 if (ret < 0) 1592 if (ret < 0)
1652 goto out_unlock; 1593 goto out_cleanup;
1653 } 1594 }
1654out_sleep: 1595out_sleep:
1655 wl1271_ps_elp_sleep(wl); 1596 wl1271_ps_elp_sleep(wl);
1597out_cleanup:
1598 wlvif->ps_compl = NULL;
1656out_unlock: 1599out_unlock:
1657 mutex_unlock(&wl->mutex); 1600 mutex_unlock(&wl->mutex);
1658out:
1659 return ret; 1601 return ret;
1660 1602
1661} 1603}
1662 1604
1663static int wl1271_configure_suspend_ap(struct wl1271 *wl) 1605static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1606 struct wl12xx_vif *wlvif)
1664{ 1607{
1665 int ret = 0; 1608 int ret = 0;
1666 1609
1667 mutex_lock(&wl->mutex); 1610 mutex_lock(&wl->mutex);
1668 1611
1669 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) 1612 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1670 goto out_unlock; 1613 goto out_unlock;
1671 1614
1672 ret = wl1271_ps_elp_wakeup(wl); 1615 ret = wl1271_ps_elp_wakeup(wl);
1673 if (ret < 0) 1616 if (ret < 0)
1674 goto out_unlock; 1617 goto out_unlock;
1675 1618
1676 ret = wl1271_acx_beacon_filter_opt(wl, true); 1619 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1677 1620
1678 wl1271_ps_elp_sleep(wl); 1621 wl1271_ps_elp_sleep(wl);
1679out_unlock: 1622out_unlock:
@@ -1682,20 +1625,22 @@ out_unlock:
1682 1625
1683} 1626}
1684 1627
1685static int wl1271_configure_suspend(struct wl1271 *wl) 1628static int wl1271_configure_suspend(struct wl1271 *wl,
1629 struct wl12xx_vif *wlvif)
1686{ 1630{
1687 if (wl->bss_type == BSS_TYPE_STA_BSS) 1631 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1688 return wl1271_configure_suspend_sta(wl); 1632 return wl1271_configure_suspend_sta(wl, wlvif);
1689 if (wl->bss_type == BSS_TYPE_AP_BSS) 1633 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1690 return wl1271_configure_suspend_ap(wl); 1634 return wl1271_configure_suspend_ap(wl, wlvif);
1691 return 0; 1635 return 0;
1692} 1636}
1693 1637
1694static void wl1271_configure_resume(struct wl1271 *wl) 1638static void wl1271_configure_resume(struct wl1271 *wl,
1639 struct wl12xx_vif *wlvif)
1695{ 1640{
1696 int ret; 1641 int ret;
1697 bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS; 1642 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1698 bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS; 1643 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1699 1644
1700 if (!is_sta && !is_ap) 1645 if (!is_sta && !is_ap)
1701 return; 1646 return;
@@ -1707,11 +1652,11 @@ static void wl1271_configure_resume(struct wl1271 *wl)
1707 1652
1708 if (is_sta) { 1653 if (is_sta) {
1709 /* exit psm if it wasn't configured */ 1654 /* exit psm if it wasn't configured */
1710 if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) 1655 if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags))
1711 wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, 1656 wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
1712 wl->basic_rate, true); 1657 wlvif->basic_rate, true);
1713 } else if (is_ap) { 1658 } else if (is_ap) {
1714 wl1271_acx_beacon_filter_opt(wl, false); 1659 wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1715 } 1660 }
1716 1661
1717 wl1271_ps_elp_sleep(wl); 1662 wl1271_ps_elp_sleep(wl);
@@ -1723,16 +1668,19 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1723 struct cfg80211_wowlan *wow) 1668 struct cfg80211_wowlan *wow)
1724{ 1669{
1725 struct wl1271 *wl = hw->priv; 1670 struct wl1271 *wl = hw->priv;
1671 struct wl12xx_vif *wlvif;
1726 int ret; 1672 int ret;
1727 1673
1728 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); 1674 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1729 WARN_ON(!wow || !wow->any); 1675 WARN_ON(!wow || !wow->any);
1730 1676
1731 wl->wow_enabled = true; 1677 wl->wow_enabled = true;
1732 ret = wl1271_configure_suspend(wl); 1678 wl12xx_for_each_wlvif(wl, wlvif) {
1733 if (ret < 0) { 1679 ret = wl1271_configure_suspend(wl, wlvif);
1734 wl1271_warning("couldn't prepare device to suspend"); 1680 if (ret < 0) {
1735 return ret; 1681 wl1271_warning("couldn't prepare device to suspend");
1682 return ret;
1683 }
1736 } 1684 }
1737 /* flush any remaining work */ 1685 /* flush any remaining work */
1738 wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); 1686 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
@@ -1751,7 +1699,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1751 1699
1752 wl1271_enable_interrupts(wl); 1700 wl1271_enable_interrupts(wl);
1753 flush_work(&wl->tx_work); 1701 flush_work(&wl->tx_work);
1754 flush_delayed_work(&wl->pspoll_work); 1702 wl12xx_for_each_wlvif(wl, wlvif) {
1703 flush_delayed_work(&wlvif->pspoll_work);
1704 }
1755 flush_delayed_work(&wl->elp_work); 1705 flush_delayed_work(&wl->elp_work);
1756 1706
1757 return 0; 1707 return 0;
@@ -1760,6 +1710,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1760static int wl1271_op_resume(struct ieee80211_hw *hw) 1710static int wl1271_op_resume(struct ieee80211_hw *hw)
1761{ 1711{
1762 struct wl1271 *wl = hw->priv; 1712 struct wl1271 *wl = hw->priv;
1713 struct wl12xx_vif *wlvif;
1763 unsigned long flags; 1714 unsigned long flags;
1764 bool run_irq_work = false; 1715 bool run_irq_work = false;
1765 1716
@@ -1783,7 +1734,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
1783 wl1271_irq(0, wl); 1734 wl1271_irq(0, wl);
1784 wl1271_enable_interrupts(wl); 1735 wl1271_enable_interrupts(wl);
1785 } 1736 }
1786 wl1271_configure_resume(wl); 1737 wl12xx_for_each_wlvif(wl, wlvif) {
1738 wl1271_configure_resume(wl, wlvif);
1739 }
1787 wl->wow_enabled = false; 1740 wl->wow_enabled = false;
1788 1741
1789 return 0; 1742 return 0;
@@ -1810,20 +1763,119 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
1810 1763
1811static void wl1271_op_stop(struct ieee80211_hw *hw) 1764static void wl1271_op_stop(struct ieee80211_hw *hw)
1812{ 1765{
1766 struct wl1271 *wl = hw->priv;
1767 int i;
1768
1813 wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); 1769 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1770
1771 mutex_lock(&wl->mutex);
1772 if (wl->state == WL1271_STATE_OFF) {
1773 mutex_unlock(&wl->mutex);
1774 return;
1775 }
1776 /*
1777 * this must be before the cancel_work calls below, so that the work
1778 * functions don't perform further work.
1779 */
1780 wl->state = WL1271_STATE_OFF;
1781 mutex_unlock(&wl->mutex);
1782
1783 mutex_lock(&wl_list_mutex);
1784 list_del(&wl->list);
1785 mutex_unlock(&wl_list_mutex);
1786
1787 wl1271_disable_interrupts(wl);
1788 wl1271_flush_deferred_work(wl);
1789 cancel_delayed_work_sync(&wl->scan_complete_work);
1790 cancel_work_sync(&wl->netstack_work);
1791 cancel_work_sync(&wl->tx_work);
1792 cancel_delayed_work_sync(&wl->elp_work);
1793
1794 /* let's notify MAC80211 about the remaining pending TX frames */
1795 wl12xx_tx_reset(wl, true);
1796 mutex_lock(&wl->mutex);
1797
1798 wl1271_power_off(wl);
1799
1800 wl->band = IEEE80211_BAND_2GHZ;
1801
1802 wl->rx_counter = 0;
1803 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1804 wl->tx_blocks_available = 0;
1805 wl->tx_allocated_blocks = 0;
1806 wl->tx_results_count = 0;
1807 wl->tx_packets_count = 0;
1808 wl->time_offset = 0;
1809 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
1810 wl->ap_fw_ps_map = 0;
1811 wl->ap_ps_map = 0;
1812 wl->sched_scanning = false;
1813 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1814 memset(wl->links_map, 0, sizeof(wl->links_map));
1815 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1816 wl->active_sta_count = 0;
1817
1818 /* The system link is always allocated */
1819 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1820
1821 /*
1822 * this is performed after the cancel_work calls and the associated
1823 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1824 * get executed before all these vars have been reset.
1825 */
1826 wl->flags = 0;
1827
1828 wl->tx_blocks_freed = 0;
1829
1830 for (i = 0; i < NUM_TX_QUEUES; i++) {
1831 wl->tx_pkts_freed[i] = 0;
1832 wl->tx_allocated_pkts[i] = 0;
1833 }
1834
1835 wl1271_debugfs_reset(wl);
1836
1837 kfree(wl->fw_status);
1838 wl->fw_status = NULL;
1839 kfree(wl->tx_res_if);
1840 wl->tx_res_if = NULL;
1841 kfree(wl->target_mem_map);
1842 wl->target_mem_map = NULL;
1843
1844 mutex_unlock(&wl->mutex);
1845}
1846
1847static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
1848{
1849 u8 policy = find_first_zero_bit(wl->rate_policies_map,
1850 WL12XX_MAX_RATE_POLICIES);
1851 if (policy >= WL12XX_MAX_RATE_POLICIES)
1852 return -EBUSY;
1853
1854 __set_bit(policy, wl->rate_policies_map);
1855 *idx = policy;
1856 return 0;
1857}
1858
1859static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
1860{
1861 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
1862 return;
1863
1864 __clear_bit(*idx, wl->rate_policies_map);
1865 *idx = WL12XX_MAX_RATE_POLICIES;
1814} 1866}
1815 1867
1816static u8 wl12xx_get_role_type(struct wl1271 *wl) 1868static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1817{ 1869{
1818 switch (wl->bss_type) { 1870 switch (wlvif->bss_type) {
1819 case BSS_TYPE_AP_BSS: 1871 case BSS_TYPE_AP_BSS:
1820 if (wl->p2p) 1872 if (wlvif->p2p)
1821 return WL1271_ROLE_P2P_GO; 1873 return WL1271_ROLE_P2P_GO;
1822 else 1874 else
1823 return WL1271_ROLE_AP; 1875 return WL1271_ROLE_AP;
1824 1876
1825 case BSS_TYPE_STA_BSS: 1877 case BSS_TYPE_STA_BSS:
1826 if (wl->p2p) 1878 if (wlvif->p2p)
1827 return WL1271_ROLE_P2P_CL; 1879 return WL1271_ROLE_P2P_CL;
1828 else 1880 else
1829 return WL1271_ROLE_STA; 1881 return WL1271_ROLE_STA;
@@ -1832,78 +1884,95 @@ static u8 wl12xx_get_role_type(struct wl1271 *wl)
1832 return WL1271_ROLE_IBSS; 1884 return WL1271_ROLE_IBSS;
1833 1885
1834 default: 1886 default:
1835 wl1271_error("invalid bss_type: %d", wl->bss_type); 1887 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
1836 } 1888 }
1837 return WL12XX_INVALID_ROLE_TYPE; 1889 return WL12XX_INVALID_ROLE_TYPE;
1838} 1890}
1839 1891
1840static int wl1271_op_add_interface(struct ieee80211_hw *hw, 1892static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
1841 struct ieee80211_vif *vif)
1842{ 1893{
1843 struct wl1271 *wl = hw->priv; 1894 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
1844 struct wiphy *wiphy = hw->wiphy; 1895 int i;
1845 int retries = WL1271_BOOT_RETRIES;
1846 int ret = 0;
1847 u8 role_type;
1848 bool booted = false;
1849
1850 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
1851 ieee80211_vif_type_p2p(vif), vif->addr);
1852
1853 mutex_lock(&wl->mutex);
1854 if (wl->vif) {
1855 wl1271_debug(DEBUG_MAC80211,
1856 "multiple vifs are not supported yet");
1857 ret = -EBUSY;
1858 goto out;
1859 }
1860 1896
1861 /* 1897 /* clear everything but the persistent data */
1862 * in some very corner case HW recovery scenarios its possible to 1898 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
1863 * get here before __wl1271_op_remove_interface is complete, so
1864 * opt out if that is the case.
1865 */
1866 if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
1867 ret = -EBUSY;
1868 goto out;
1869 }
1870 1899
1871 switch (ieee80211_vif_type_p2p(vif)) { 1900 switch (ieee80211_vif_type_p2p(vif)) {
1872 case NL80211_IFTYPE_P2P_CLIENT: 1901 case NL80211_IFTYPE_P2P_CLIENT:
1873 wl->p2p = 1; 1902 wlvif->p2p = 1;
1874 /* fall-through */ 1903 /* fall-through */
1875 case NL80211_IFTYPE_STATION: 1904 case NL80211_IFTYPE_STATION:
1876 wl->bss_type = BSS_TYPE_STA_BSS; 1905 wlvif->bss_type = BSS_TYPE_STA_BSS;
1877 wl->set_bss_type = BSS_TYPE_STA_BSS;
1878 break; 1906 break;
1879 case NL80211_IFTYPE_ADHOC: 1907 case NL80211_IFTYPE_ADHOC:
1880 wl->bss_type = BSS_TYPE_IBSS; 1908 wlvif->bss_type = BSS_TYPE_IBSS;
1881 wl->set_bss_type = BSS_TYPE_STA_BSS;
1882 break; 1909 break;
1883 case NL80211_IFTYPE_P2P_GO: 1910 case NL80211_IFTYPE_P2P_GO:
1884 wl->p2p = 1; 1911 wlvif->p2p = 1;
1885 /* fall-through */ 1912 /* fall-through */
1886 case NL80211_IFTYPE_AP: 1913 case NL80211_IFTYPE_AP:
1887 wl->bss_type = BSS_TYPE_AP_BSS; 1914 wlvif->bss_type = BSS_TYPE_AP_BSS;
1888 break; 1915 break;
1889 default: 1916 default:
1890 ret = -EOPNOTSUPP; 1917 wlvif->bss_type = MAX_BSS_TYPE;
1891 goto out; 1918 return -EOPNOTSUPP;
1892 } 1919 }
1893 1920
1894 role_type = wl12xx_get_role_type(wl); 1921 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
1895 if (role_type == WL12XX_INVALID_ROLE_TYPE) { 1922 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
1896 ret = -EINVAL; 1923 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
1897 goto out; 1924
1925 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
1926 wlvif->bss_type == BSS_TYPE_IBSS) {
1927 /* init sta/ibss data */
1928 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
1929 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
1930 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
1931 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
1932 } else {
1933 /* init ap data */
1934 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
1935 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
1936 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
1937 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
1938 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
1939 wl12xx_allocate_rate_policy(wl,
1940 &wlvif->ap.ucast_rate_idx[i]);
1898 } 1941 }
1899 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
1900 1942
1901 if (wl->state != WL1271_STATE_OFF) { 1943 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
1902 wl1271_error("cannot start because not in off state: %d", 1944 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
1903 wl->state); 1945 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
1904 ret = -EBUSY; 1946 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
1905 goto out; 1947 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
1906 } 1948 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
1949
1950 /*
1951 * mac80211 configures some values globally, while we treat them
1952 * per-interface. thus, on init, we have to copy them from wl
1953 */
1954 wlvif->band = wl->band;
1955 wlvif->channel = wl->channel;
1956 wlvif->power_level = wl->power_level;
1957
1958 INIT_WORK(&wlvif->rx_streaming_enable_work,
1959 wl1271_rx_streaming_enable_work);
1960 INIT_WORK(&wlvif->rx_streaming_disable_work,
1961 wl1271_rx_streaming_disable_work);
1962 INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work);
1963 INIT_LIST_HEAD(&wlvif->list);
1964
1965 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
1966 (unsigned long) wlvif);
1967 return 0;
1968}
1969
1970static bool wl12xx_init_fw(struct wl1271 *wl)
1971{
1972 int retries = WL1271_BOOT_RETRIES;
1973 bool booted = false;
1974 struct wiphy *wiphy = wl->hw->wiphy;
1975 int ret;
1907 1976
1908 while (retries) { 1977 while (retries) {
1909 retries--; 1978 retries--;
@@ -1915,25 +1984,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1915 if (ret < 0) 1984 if (ret < 0)
1916 goto power_off; 1985 goto power_off;
1917 1986
1918 if (wl->bss_type == BSS_TYPE_STA_BSS ||
1919 wl->bss_type == BSS_TYPE_IBSS) {
1920 /*
1921 * The device role is a special role used for
1922 * rx and tx frames prior to association (as
1923 * the STA role can get packets only from
1924 * its associated bssid)
1925 */
1926 ret = wl12xx_cmd_role_enable(wl,
1927 WL1271_ROLE_DEVICE,
1928 &wl->dev_role_id);
1929 if (ret < 0)
1930 goto irq_disable;
1931 }
1932
1933 ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
1934 if (ret < 0)
1935 goto irq_disable;
1936
1937 ret = wl1271_hw_init(wl); 1987 ret = wl1271_hw_init(wl);
1938 if (ret < 0) 1988 if (ret < 0)
1939 goto irq_disable; 1989 goto irq_disable;
@@ -1964,9 +2014,6 @@ power_off:
1964 goto out; 2014 goto out;
1965 } 2015 }
1966 2016
1967 wl->vif = vif;
1968 wl->state = WL1271_STATE_ON;
1969 set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
1970 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); 2017 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
1971 2018
1972 /* update hw/fw version info in wiphy struct */ 2019 /* update hw/fw version info in wiphy struct */
@@ -1984,7 +2031,110 @@ power_off:
1984 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", 2031 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
1985 wl->enable_11a ? "" : "not "); 2032 wl->enable_11a ? "" : "not ");
1986 2033
2034 wl->state = WL1271_STATE_ON;
1987out: 2035out:
2036 return booted;
2037}
2038
2039static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2040 struct ieee80211_vif *vif)
2041{
2042 struct wl1271 *wl = hw->priv;
2043 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2044 int ret = 0;
2045 u8 role_type;
2046 bool booted = false;
2047
2048 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2049 ieee80211_vif_type_p2p(vif), vif->addr);
2050
2051 mutex_lock(&wl->mutex);
2052 ret = wl1271_ps_elp_wakeup(wl);
2053 if (ret < 0)
2054 goto out_unlock;
2055
2056 if (wl->vif) {
2057 wl1271_debug(DEBUG_MAC80211,
2058 "multiple vifs are not supported yet");
2059 ret = -EBUSY;
2060 goto out;
2061 }
2062
2063 /*
2064 * in some very corner case HW recovery scenarios its possible to
2065 * get here before __wl1271_op_remove_interface is complete, so
2066 * opt out if that is the case.
2067 */
2068 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2069 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2070 ret = -EBUSY;
2071 goto out;
2072 }
2073
2074 ret = wl12xx_init_vif_data(wl, vif);
2075 if (ret < 0)
2076 goto out;
2077
2078 wlvif->wl = wl;
2079 role_type = wl12xx_get_role_type(wl, wlvif);
2080 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2081 ret = -EINVAL;
2082 goto out;
2083 }
2084
2085 /*
2086 * TODO: after the nvs issue will be solved, move this block
2087 * to start(), and make sure here the driver is ON.
2088 */
2089 if (wl->state == WL1271_STATE_OFF) {
2090 /*
2091 * we still need this in order to configure the fw
2092 * while uploading the nvs
2093 */
2094 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
2095
2096 booted = wl12xx_init_fw(wl);
2097 if (!booted) {
2098 ret = -EINVAL;
2099 goto out;
2100 }
2101 }
2102
2103 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2104 wlvif->bss_type == BSS_TYPE_IBSS) {
2105 /*
2106 * The device role is a special role used for
2107 * rx and tx frames prior to association (as
2108 * the STA role can get packets only from
2109 * its associated bssid)
2110 */
2111 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2112 WL1271_ROLE_DEVICE,
2113 &wlvif->dev_role_id);
2114 if (ret < 0)
2115 goto out;
2116 }
2117
2118 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2119 role_type, &wlvif->role_id);
2120 if (ret < 0)
2121 goto out;
2122
2123 ret = wl1271_init_vif_specific(wl, vif);
2124 if (ret < 0)
2125 goto out;
2126
2127 wl->vif = vif;
2128 list_add(&wlvif->list, &wl->wlvif_list);
2129 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2130
2131 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2132 wl->ap_count++;
2133 else
2134 wl->sta_count++;
2135out:
2136 wl1271_ps_elp_sleep(wl);
2137out_unlock:
1988 mutex_unlock(&wl->mutex); 2138 mutex_unlock(&wl->mutex);
1989 2139
1990 mutex_lock(&wl_list_mutex); 2140 mutex_lock(&wl_list_mutex);
@@ -1996,29 +2146,34 @@ out:
1996} 2146}
1997 2147
1998static void __wl1271_op_remove_interface(struct wl1271 *wl, 2148static void __wl1271_op_remove_interface(struct wl1271 *wl,
2149 struct ieee80211_vif *vif,
1999 bool reset_tx_queues) 2150 bool reset_tx_queues)
2000{ 2151{
2001 int ret, i; 2152 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2153 int i, ret;
2002 2154
2003 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 2155 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2004 2156
2157 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2158 return;
2159
2160 wl->vif = NULL;
2161
2005 /* because of hardware recovery, we may get here twice */ 2162 /* because of hardware recovery, we may get here twice */
2006 if (wl->state != WL1271_STATE_ON) 2163 if (wl->state != WL1271_STATE_ON)
2007 return; 2164 return;
2008 2165
2009 wl1271_info("down"); 2166 wl1271_info("down");
2010 2167
2011 mutex_lock(&wl_list_mutex);
2012 list_del(&wl->list);
2013 mutex_unlock(&wl_list_mutex);
2014
2015 /* enable dyn ps just in case (if left on due to fw crash etc) */ 2168 /* enable dyn ps just in case (if left on due to fw crash etc) */
2016 if (wl->bss_type == BSS_TYPE_STA_BSS) 2169 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
2017 ieee80211_enable_dyn_ps(wl->vif); 2170 ieee80211_enable_dyn_ps(vif);
2018 2171
2019 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { 2172 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2173 wl->scan_vif == vif) {
2020 wl->scan.state = WL1271_SCAN_STATE_IDLE; 2174 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2021 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 2175 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2176 wl->scan_vif = NULL;
2022 wl->scan.req = NULL; 2177 wl->scan.req = NULL;
2023 ieee80211_scan_completed(wl->hw, true); 2178 ieee80211_scan_completed(wl->hw, true);
2024 } 2179 }
@@ -2029,13 +2184,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2029 if (ret < 0) 2184 if (ret < 0)
2030 goto deinit; 2185 goto deinit;
2031 2186
2032 if (wl->bss_type == BSS_TYPE_STA_BSS) { 2187 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
2033 ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id); 2188 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2034 if (ret < 0) 2189 if (ret < 0)
2035 goto deinit; 2190 goto deinit;
2036 } 2191 }
2037 2192
2038 ret = wl12xx_cmd_role_disable(wl, &wl->role_id); 2193 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2039 if (ret < 0) 2194 if (ret < 0)
2040 goto deinit; 2195 goto deinit;
2041 2196
@@ -2043,120 +2198,82 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2043 } 2198 }
2044deinit: 2199deinit:
2045 /* clear all hlids (except system_hlid) */ 2200 /* clear all hlids (except system_hlid) */
2046 wl->sta_hlid = WL12XX_INVALID_LINK_ID; 2201 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2047 wl->dev_hlid = WL12XX_INVALID_LINK_ID; 2202
2048 wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID; 2203 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2049 wl->ap_global_hlid = WL12XX_INVALID_LINK_ID; 2204 wlvif->bss_type == BSS_TYPE_IBSS) {
2205 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2206 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2207 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2208 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2209 } else {
2210 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2211 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2212 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2213 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2214 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2215 wl12xx_free_rate_policy(wl,
2216 &wlvif->ap.ucast_rate_idx[i]);
2217 }
2050 2218
2051 /* 2219 wl12xx_tx_reset_wlvif(wl, wlvif);
2052 * this must be before the cancel_work calls below, so that the work 2220 wl1271_free_ap_keys(wl, wlvif);
2053 * functions don't perform further work. 2221 if (wl->last_wlvif == wlvif)
2054 */ 2222 wl->last_wlvif = NULL;
2055 wl->state = WL1271_STATE_OFF; 2223 list_del(&wlvif->list);
2224 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2225 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2226 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2227
2228 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2229 wl->ap_count--;
2230 else
2231 wl->sta_count--;
2056 2232
2057 mutex_unlock(&wl->mutex); 2233 mutex_unlock(&wl->mutex);
2058 2234 del_timer_sync(&wlvif->rx_streaming_timer);
2059 wl1271_disable_interrupts(wl); 2235 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2060 wl1271_flush_deferred_work(wl); 2236 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2061 cancel_delayed_work_sync(&wl->scan_complete_work); 2237 cancel_delayed_work_sync(&wlvif->pspoll_work);
2062 cancel_work_sync(&wl->netstack_work);
2063 cancel_work_sync(&wl->tx_work);
2064 del_timer_sync(&wl->rx_streaming_timer);
2065 cancel_work_sync(&wl->rx_streaming_enable_work);
2066 cancel_work_sync(&wl->rx_streaming_disable_work);
2067 cancel_delayed_work_sync(&wl->pspoll_work);
2068 cancel_delayed_work_sync(&wl->elp_work);
2069 2238
2070 mutex_lock(&wl->mutex); 2239 mutex_lock(&wl->mutex);
2071
2072 /* let's notify MAC80211 about the remaining pending TX frames */
2073 wl1271_tx_reset(wl, reset_tx_queues);
2074 wl1271_power_off(wl);
2075
2076 memset(wl->bssid, 0, ETH_ALEN);
2077 memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
2078 wl->ssid_len = 0;
2079 wl->bss_type = MAX_BSS_TYPE;
2080 wl->set_bss_type = MAX_BSS_TYPE;
2081 wl->p2p = 0;
2082 wl->band = IEEE80211_BAND_2GHZ;
2083
2084 wl->rx_counter = 0;
2085 wl->psm_entry_retry = 0;
2086 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2087 wl->tx_blocks_available = 0;
2088 wl->tx_allocated_blocks = 0;
2089 wl->tx_results_count = 0;
2090 wl->tx_packets_count = 0;
2091 wl->time_offset = 0;
2092 wl->session_counter = 0;
2093 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
2094 wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2095 wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2096 wl->vif = NULL;
2097 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
2098 wl1271_free_ap_keys(wl);
2099 memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
2100 wl->ap_fw_ps_map = 0;
2101 wl->ap_ps_map = 0;
2102 wl->sched_scanning = false;
2103 wl->role_id = WL12XX_INVALID_ROLE_ID;
2104 wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
2105 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2106 memset(wl->links_map, 0, sizeof(wl->links_map));
2107 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2108 wl->active_sta_count = 0;
2109
2110 /* The system link is always allocated */
2111 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2112
2113 /*
2114 * this is performed after the cancel_work calls and the associated
2115 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2116 * get executed before all these vars have been reset.
2117 */
2118 wl->flags = 0;
2119
2120 wl->tx_blocks_freed = 0;
2121
2122 for (i = 0; i < NUM_TX_QUEUES; i++) {
2123 wl->tx_pkts_freed[i] = 0;
2124 wl->tx_allocated_pkts[i] = 0;
2125 }
2126
2127 wl1271_debugfs_reset(wl);
2128
2129 kfree(wl->fw_status);
2130 wl->fw_status = NULL;
2131 kfree(wl->tx_res_if);
2132 wl->tx_res_if = NULL;
2133 kfree(wl->target_mem_map);
2134 wl->target_mem_map = NULL;
2135} 2240}
2136 2241
2137static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 2242static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2138 struct ieee80211_vif *vif) 2243 struct ieee80211_vif *vif)
2139{ 2244{
2140 struct wl1271 *wl = hw->priv; 2245 struct wl1271 *wl = hw->priv;
2246 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2247 struct wl12xx_vif *iter;
2141 2248
2142 mutex_lock(&wl->mutex); 2249 mutex_lock(&wl->mutex);
2250
2251 if (wl->state == WL1271_STATE_OFF ||
2252 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2253 goto out;
2254
2143 /* 2255 /*
2144 * wl->vif can be null here if someone shuts down the interface 2256 * wl->vif can be null here if someone shuts down the interface
2145 * just when hardware recovery has been started. 2257 * just when hardware recovery has been started.
2146 */ 2258 */
2147 if (wl->vif) { 2259 wl12xx_for_each_wlvif(wl, iter) {
2148 WARN_ON(wl->vif != vif); 2260 if (iter != wlvif)
2149 __wl1271_op_remove_interface(wl, true); 2261 continue;
2150 }
2151 2262
2263 __wl1271_op_remove_interface(wl, vif, true);
2264 break;
2265 }
2266 WARN_ON(iter != wlvif);
2267out:
2152 mutex_unlock(&wl->mutex); 2268 mutex_unlock(&wl->mutex);
2153 cancel_work_sync(&wl->recovery_work); 2269 cancel_work_sync(&wl->recovery_work);
2154} 2270}
2155 2271
2156static int wl1271_join(struct wl1271 *wl, bool set_assoc) 2272static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2273 bool set_assoc)
2157{ 2274{
2158 int ret; 2275 int ret;
2159 bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS); 2276 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2160 2277
2161 /* 2278 /*
2162 * One of the side effects of the JOIN command is that is clears 2279 * One of the side effects of the JOIN command is that is clears
@@ -2167,20 +2284,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
2167 * Keep the below message for now, unless it starts bothering 2284 * Keep the below message for now, unless it starts bothering
2168 * users who really like to roam a lot :) 2285 * users who really like to roam a lot :)
2169 */ 2286 */
2170 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 2287 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2171 wl1271_info("JOIN while associated."); 2288 wl1271_info("JOIN while associated.");
2172 2289
2173 if (set_assoc) 2290 if (set_assoc)
2174 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); 2291 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2175 2292
2176 if (is_ibss) 2293 if (is_ibss)
2177 ret = wl12xx_cmd_role_start_ibss(wl); 2294 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2178 else 2295 else
2179 ret = wl12xx_cmd_role_start_sta(wl); 2296 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2180 if (ret < 0) 2297 if (ret < 0)
2181 goto out; 2298 goto out;
2182 2299
2183 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 2300 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2184 goto out; 2301 goto out;
2185 2302
2186 /* 2303 /*
@@ -2189,19 +2306,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
2189 * the join. The acx_aid starts the keep-alive process, and the order 2306 * the join. The acx_aid starts the keep-alive process, and the order
2190 * of the commands below is relevant. 2307 * of the commands below is relevant.
2191 */ 2308 */
2192 ret = wl1271_acx_keep_alive_mode(wl, true); 2309 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2193 if (ret < 0) 2310 if (ret < 0)
2194 goto out; 2311 goto out;
2195 2312
2196 ret = wl1271_acx_aid(wl, wl->aid); 2313 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2197 if (ret < 0) 2314 if (ret < 0)
2198 goto out; 2315 goto out;
2199 2316
2200 ret = wl1271_cmd_build_klv_null_data(wl); 2317 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2201 if (ret < 0) 2318 if (ret < 0)
2202 goto out; 2319 goto out;
2203 2320
2204 ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA, 2321 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2322 CMD_TEMPL_KLV_IDX_NULL_DATA,
2205 ACX_KEEP_ALIVE_TPL_VALID); 2323 ACX_KEEP_ALIVE_TPL_VALID);
2206 if (ret < 0) 2324 if (ret < 0)
2207 goto out; 2325 goto out;
@@ -2210,34 +2328,34 @@ out:
2210 return ret; 2328 return ret;
2211} 2329}
2212 2330
2213static int wl1271_unjoin(struct wl1271 *wl) 2331static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2214{ 2332{
2215 int ret; 2333 int ret;
2216 2334
2217 if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) { 2335 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2336 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2337
2218 wl12xx_cmd_stop_channel_switch(wl); 2338 wl12xx_cmd_stop_channel_switch(wl);
2219 ieee80211_chswitch_done(wl->vif, false); 2339 ieee80211_chswitch_done(vif, false);
2220 } 2340 }
2221 2341
2222 /* to stop listening to a channel, we disconnect */ 2342 /* to stop listening to a channel, we disconnect */
2223 ret = wl12xx_cmd_role_stop_sta(wl); 2343 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
2224 if (ret < 0) 2344 if (ret < 0)
2225 goto out; 2345 goto out;
2226 2346
2227 memset(wl->bssid, 0, ETH_ALEN);
2228
2229 /* reset TX security counters on a clean disconnect */ 2347 /* reset TX security counters on a clean disconnect */
2230 wl->tx_security_last_seq_lsb = 0; 2348 wlvif->tx_security_last_seq_lsb = 0;
2231 wl->tx_security_seq = 0; 2349 wlvif->tx_security_seq = 0;
2232 2350
2233out: 2351out:
2234 return ret; 2352 return ret;
2235} 2353}
2236 2354
2237static void wl1271_set_band_rate(struct wl1271 *wl) 2355static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2238{ 2356{
2239 wl->basic_rate_set = wl->bitrate_masks[wl->band]; 2357 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2240 wl->rate_set = wl->basic_rate_set; 2358 wlvif->rate_set = wlvif->basic_rate_set;
2241} 2359}
2242 2360
2243static bool wl12xx_is_roc(struct wl1271 *wl) 2361static bool wl12xx_is_roc(struct wl1271 *wl)
@@ -2251,27 +2369,25 @@ static bool wl12xx_is_roc(struct wl1271 *wl)
2251 return true; 2369 return true;
2252} 2370}
2253 2371
2254static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle) 2372static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2373 bool idle)
2255{ 2374{
2256 int ret; 2375 int ret;
2257 2376
2258 if (idle) { 2377 if (idle) {
2259 /* no need to croc if we weren't busy (e.g. during boot) */ 2378 /* no need to croc if we weren't busy (e.g. during boot) */
2260 if (wl12xx_is_roc(wl)) { 2379 if (wl12xx_is_roc(wl)) {
2261 ret = wl12xx_croc(wl, wl->dev_role_id); 2380 ret = wl12xx_stop_dev(wl, wlvif);
2262 if (ret < 0)
2263 goto out;
2264
2265 ret = wl12xx_cmd_role_stop_dev(wl);
2266 if (ret < 0) 2381 if (ret < 0)
2267 goto out; 2382 goto out;
2268 } 2383 }
2269 wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 2384 wlvif->rate_set =
2270 ret = wl1271_acx_sta_rate_policies(wl); 2385 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2386 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2271 if (ret < 0) 2387 if (ret < 0)
2272 goto out; 2388 goto out;
2273 ret = wl1271_acx_keep_alive_config( 2389 ret = wl1271_acx_keep_alive_config(
2274 wl, CMD_TEMPL_KLV_IDX_NULL_DATA, 2390 wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
2275 ACX_KEEP_ALIVE_TPL_INVALID); 2391 ACX_KEEP_ALIVE_TPL_INVALID);
2276 if (ret < 0) 2392 if (ret < 0)
2277 goto out; 2393 goto out;
@@ -2283,11 +2399,7 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
2283 ieee80211_sched_scan_stopped(wl->hw); 2399 ieee80211_sched_scan_stopped(wl->hw);
2284 } 2400 }
2285 2401
2286 ret = wl12xx_cmd_role_start_dev(wl); 2402 ret = wl12xx_start_dev(wl, wlvif);
2287 if (ret < 0)
2288 goto out;
2289
2290 ret = wl12xx_roc(wl, wl->dev_role_id);
2291 if (ret < 0) 2403 if (ret < 0)
2292 goto out; 2404 goto out;
2293 clear_bit(WL1271_FLAG_IDLE, &wl->flags); 2405 clear_bit(WL1271_FLAG_IDLE, &wl->flags);
@@ -2297,61 +2409,22 @@ out:
2297 return ret; 2409 return ret;
2298} 2410}
2299 2411
2300static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) 2412static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2413 struct ieee80211_conf *conf, u32 changed)
2301{ 2414{
2302 struct wl1271 *wl = hw->priv; 2415 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2303 struct ieee80211_conf *conf = &hw->conf; 2416 int channel, ret;
2304 int channel, ret = 0;
2305 bool is_ap;
2306 2417
2307 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 2418 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2308 2419
2309 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2310 " changed 0x%x",
2311 channel,
2312 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2313 conf->power_level,
2314 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2315 changed);
2316
2317 /*
2318 * mac80211 will go to idle nearly immediately after transmitting some
2319 * frames, such as the deauth. To make sure those frames reach the air,
2320 * wait here until the TX queue is fully flushed.
2321 */
2322 if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2323 (conf->flags & IEEE80211_CONF_IDLE))
2324 wl1271_tx_flush(wl);
2325
2326 mutex_lock(&wl->mutex);
2327
2328 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2329 /* we support configuring the channel and band while off */
2330 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2331 wl->band = conf->channel->band;
2332 wl->channel = channel;
2333 }
2334
2335 if ((changed & IEEE80211_CONF_CHANGE_POWER))
2336 wl->power_level = conf->power_level;
2337
2338 goto out;
2339 }
2340
2341 is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
2342
2343 ret = wl1271_ps_elp_wakeup(wl);
2344 if (ret < 0)
2345 goto out;
2346
2347 /* if the channel changes while joined, join again */ 2420 /* if the channel changes while joined, join again */
2348 if (changed & IEEE80211_CONF_CHANGE_CHANNEL && 2421 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
2349 ((wl->band != conf->channel->band) || 2422 ((wlvif->band != conf->channel->band) ||
2350 (wl->channel != channel))) { 2423 (wlvif->channel != channel))) {
2351 /* send all pending packets */ 2424 /* send all pending packets */
2352 wl1271_tx_work_locked(wl); 2425 wl1271_tx_work_locked(wl);
2353 wl->band = conf->channel->band; 2426 wlvif->band = conf->channel->band;
2354 wl->channel = channel; 2427 wlvif->channel = channel;
2355 2428
2356 if (!is_ap) { 2429 if (!is_ap) {
2357 /* 2430 /*
@@ -2360,24 +2433,27 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2360 * possible rate for the band as a fixed rate for 2433 * possible rate for the band as a fixed rate for
2361 * association frames and other control messages. 2434 * association frames and other control messages.
2362 */ 2435 */
2363 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 2436 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2364 wl1271_set_band_rate(wl); 2437 wl1271_set_band_rate(wl, wlvif);
2365 2438
2366 wl->basic_rate = 2439 wlvif->basic_rate =
2367 wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 2440 wl1271_tx_min_rate_get(wl,
2368 ret = wl1271_acx_sta_rate_policies(wl); 2441 wlvif->basic_rate_set);
2442 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2369 if (ret < 0) 2443 if (ret < 0)
2370 wl1271_warning("rate policy for channel " 2444 wl1271_warning("rate policy for channel "
2371 "failed %d", ret); 2445 "failed %d", ret);
2372 2446
2373 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { 2447 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
2448 &wlvif->flags)) {
2374 if (wl12xx_is_roc(wl)) { 2449 if (wl12xx_is_roc(wl)) {
2375 /* roaming */ 2450 /* roaming */
2376 ret = wl12xx_croc(wl, wl->dev_role_id); 2451 ret = wl12xx_croc(wl,
2452 wlvif->dev_role_id);
2377 if (ret < 0) 2453 if (ret < 0)
2378 goto out_sleep; 2454 return ret;
2379 } 2455 }
2380 ret = wl1271_join(wl, false); 2456 ret = wl1271_join(wl, wlvif, false);
2381 if (ret < 0) 2457 if (ret < 0)
2382 wl1271_warning("cmd join on channel " 2458 wl1271_warning("cmd join on channel "
2383 "failed %d", ret); 2459 "failed %d", ret);
@@ -2389,64 +2465,112 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2389 */ 2465 */
2390 if (wl12xx_is_roc(wl) && 2466 if (wl12xx_is_roc(wl) &&
2391 !(conf->flags & IEEE80211_CONF_IDLE)) { 2467 !(conf->flags & IEEE80211_CONF_IDLE)) {
2392 ret = wl12xx_croc(wl, wl->dev_role_id); 2468 ret = wl12xx_stop_dev(wl, wlvif);
2393 if (ret < 0) 2469 if (ret < 0)
2394 goto out_sleep; 2470 return ret;
2395 2471
2396 ret = wl12xx_roc(wl, wl->dev_role_id); 2472 ret = wl12xx_start_dev(wl, wlvif);
2397 if (ret < 0) 2473 if (ret < 0)
2398 wl1271_warning("roc failed %d", 2474 return ret;
2399 ret);
2400 } 2475 }
2401 } 2476 }
2402 } 2477 }
2403 } 2478 }
2404 2479
2405 if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
2406 ret = wl1271_sta_handle_idle(wl,
2407 conf->flags & IEEE80211_CONF_IDLE);
2408 if (ret < 0)
2409 wl1271_warning("idle mode change failed %d", ret);
2410 }
2411
2412 /* 2480 /*
2413 * if mac80211 changes the PSM mode, make sure the mode is not 2481 * if mac80211 changes the PSM mode, make sure the mode is not
2414 * incorrectly changed after the pspoll failure active window. 2482 * incorrectly changed after the pspoll failure active window.
2415 */ 2483 */
2416 if (changed & IEEE80211_CONF_CHANGE_PS) 2484 if (changed & IEEE80211_CONF_CHANGE_PS)
2417 clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); 2485 clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
2418 2486
2419 if (conf->flags & IEEE80211_CONF_PS && 2487 if (conf->flags & IEEE80211_CONF_PS &&
2420 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { 2488 !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
2421 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); 2489 set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
2422 2490
2423 /* 2491 /*
2424 * We enter PSM only if we're already associated. 2492 * We enter PSM only if we're already associated.
2425 * If we're not, we'll enter it when joining an SSID, 2493 * If we're not, we'll enter it when joining an SSID,
2426 * through the bss_info_changed() hook. 2494 * through the bss_info_changed() hook.
2427 */ 2495 */
2428 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { 2496 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
2429 wl1271_debug(DEBUG_PSM, "psm enabled"); 2497 wl1271_debug(DEBUG_PSM, "psm enabled");
2430 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, 2498 ret = wl1271_ps_set_mode(wl, wlvif,
2431 wl->basic_rate, true); 2499 STATION_POWER_SAVE_MODE,
2500 wlvif->basic_rate, true);
2432 } 2501 }
2433 } else if (!(conf->flags & IEEE80211_CONF_PS) && 2502 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
2434 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { 2503 test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
2435 wl1271_debug(DEBUG_PSM, "psm disabled"); 2504 wl1271_debug(DEBUG_PSM, "psm disabled");
2436 2505
2437 clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); 2506 clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
2438 2507
2439 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) 2508 if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags))
2440 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, 2509 ret = wl1271_ps_set_mode(wl, wlvif,
2441 wl->basic_rate, true); 2510 STATION_ACTIVE_MODE,
2511 wlvif->basic_rate, true);
2442 } 2512 }
2443 2513
2444 if (conf->power_level != wl->power_level) { 2514 if (conf->power_level != wlvif->power_level) {
2445 ret = wl1271_acx_tx_power(wl, conf->power_level); 2515 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2446 if (ret < 0) 2516 if (ret < 0)
2447 goto out_sleep; 2517 return ret;
2518
2519 wlvif->power_level = conf->power_level;
2520 }
2521
2522 return 0;
2523}
2524
2525static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2526{
2527 struct wl1271 *wl = hw->priv;
2528 struct wl12xx_vif *wlvif;
2529 struct ieee80211_conf *conf = &hw->conf;
2530 int channel, ret = 0;
2531
2532 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
2533
2534 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
2535 " changed 0x%x",
2536 channel,
2537 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2538 conf->power_level,
2539 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2540 changed);
2541
2542 /*
2543 * mac80211 will go to idle nearly immediately after transmitting some
2544 * frames, such as the deauth. To make sure those frames reach the air,
2545 * wait here until the TX queue is fully flushed.
2546 */
2547 if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
2548 (conf->flags & IEEE80211_CONF_IDLE))
2549 wl1271_tx_flush(wl);
2550
2551 mutex_lock(&wl->mutex);
2552
2553 /* we support configuring the channel and band even while off */
2554 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2555 wl->band = conf->channel->band;
2556 wl->channel = channel;
2557 }
2448 2558
2559 if (changed & IEEE80211_CONF_CHANGE_POWER)
2449 wl->power_level = conf->power_level; 2560 wl->power_level = conf->power_level;
2561
2562 if (unlikely(wl->state == WL1271_STATE_OFF))
2563 goto out;
2564
2565 ret = wl1271_ps_elp_wakeup(wl);
2566 if (ret < 0)
2567 goto out;
2568
2569 /* configure each interface */
2570 wl12xx_for_each_wlvif(wl, wlvif) {
2571 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2572 if (ret < 0)
2573 goto out_sleep;
2450 } 2574 }
2451 2575
2452out_sleep: 2576out_sleep:
@@ -2509,6 +2633,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2509{ 2633{
2510 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast; 2634 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2511 struct wl1271 *wl = hw->priv; 2635 struct wl1271 *wl = hw->priv;
2636 struct wl12xx_vif *wlvif;
2637
2512 int ret; 2638 int ret;
2513 2639
2514 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x" 2640 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
@@ -2526,15 +2652,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2526 if (ret < 0) 2652 if (ret < 0)
2527 goto out; 2653 goto out;
2528 2654
2529 if (wl->bss_type != BSS_TYPE_AP_BSS) { 2655 wl12xx_for_each_wlvif(wl, wlvif) {
2530 if (*total & FIF_ALLMULTI) 2656 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
2531 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); 2657 if (*total & FIF_ALLMULTI)
2532 else if (fp) 2658 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2533 ret = wl1271_acx_group_address_tbl(wl, fp->enabled, 2659 false,
2534 fp->mc_list, 2660 NULL, 0);
2535 fp->mc_list_length); 2661 else if (fp)
2536 if (ret < 0) 2662 ret = wl1271_acx_group_address_tbl(wl, wlvif,
2537 goto out_sleep; 2663 fp->enabled,
2664 fp->mc_list,
2665 fp->mc_list_length);
2666 if (ret < 0)
2667 goto out_sleep;
2668 }
2538 } 2669 }
2539 2670
2540 /* 2671 /*
@@ -2551,9 +2682,10 @@ out:
2551 kfree(fp); 2682 kfree(fp);
2552} 2683}
2553 2684
2554static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type, 2685static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2555 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, 2686 u8 id, u8 key_type, u8 key_size,
2556 u16 tx_seq_16) 2687 const u8 *key, u8 hlid, u32 tx_seq_32,
2688 u16 tx_seq_16)
2557{ 2689{
2558 struct wl1271_ap_key *ap_key; 2690 struct wl1271_ap_key *ap_key;
2559 int i; 2691 int i;
@@ -2568,10 +2700,10 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
2568 * an existing key. 2700 * an existing key.
2569 */ 2701 */
2570 for (i = 0; i < MAX_NUM_KEYS; i++) { 2702 for (i = 0; i < MAX_NUM_KEYS; i++) {
2571 if (wl->recorded_ap_keys[i] == NULL) 2703 if (wlvif->ap.recorded_keys[i] == NULL)
2572 break; 2704 break;
2573 2705
2574 if (wl->recorded_ap_keys[i]->id == id) { 2706 if (wlvif->ap.recorded_keys[i]->id == id) {
2575 wl1271_warning("trying to record key replacement"); 2707 wl1271_warning("trying to record key replacement");
2576 return -EINVAL; 2708 return -EINVAL;
2577 } 2709 }
@@ -2592,21 +2724,21 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
2592 ap_key->tx_seq_32 = tx_seq_32; 2724 ap_key->tx_seq_32 = tx_seq_32;
2593 ap_key->tx_seq_16 = tx_seq_16; 2725 ap_key->tx_seq_16 = tx_seq_16;
2594 2726
2595 wl->recorded_ap_keys[i] = ap_key; 2727 wlvif->ap.recorded_keys[i] = ap_key;
2596 return 0; 2728 return 0;
2597} 2729}
2598 2730
2599static void wl1271_free_ap_keys(struct wl1271 *wl) 2731static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2600{ 2732{
2601 int i; 2733 int i;
2602 2734
2603 for (i = 0; i < MAX_NUM_KEYS; i++) { 2735 for (i = 0; i < MAX_NUM_KEYS; i++) {
2604 kfree(wl->recorded_ap_keys[i]); 2736 kfree(wlvif->ap.recorded_keys[i]);
2605 wl->recorded_ap_keys[i] = NULL; 2737 wlvif->ap.recorded_keys[i] = NULL;
2606 } 2738 }
2607} 2739}
2608 2740
2609static int wl1271_ap_init_hwenc(struct wl1271 *wl) 2741static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2610{ 2742{
2611 int i, ret = 0; 2743 int i, ret = 0;
2612 struct wl1271_ap_key *key; 2744 struct wl1271_ap_key *key;
@@ -2614,15 +2746,15 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
2614 2746
2615 for (i = 0; i < MAX_NUM_KEYS; i++) { 2747 for (i = 0; i < MAX_NUM_KEYS; i++) {
2616 u8 hlid; 2748 u8 hlid;
2617 if (wl->recorded_ap_keys[i] == NULL) 2749 if (wlvif->ap.recorded_keys[i] == NULL)
2618 break; 2750 break;
2619 2751
2620 key = wl->recorded_ap_keys[i]; 2752 key = wlvif->ap.recorded_keys[i];
2621 hlid = key->hlid; 2753 hlid = key->hlid;
2622 if (hlid == WL12XX_INVALID_LINK_ID) 2754 if (hlid == WL12XX_INVALID_LINK_ID)
2623 hlid = wl->ap_bcast_hlid; 2755 hlid = wlvif->ap.bcast_hlid;
2624 2756
2625 ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE, 2757 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2626 key->id, key->key_type, 2758 key->id, key->key_type,
2627 key->key_size, key->key, 2759 key->key_size, key->key,
2628 hlid, key->tx_seq_32, 2760 hlid, key->tx_seq_32,
@@ -2635,23 +2767,24 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
2635 } 2767 }
2636 2768
2637 if (wep_key_added) { 2769 if (wep_key_added) {
2638 ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key, 2770 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
2639 wl->ap_bcast_hlid); 2771 wlvif->ap.bcast_hlid);
2640 if (ret < 0) 2772 if (ret < 0)
2641 goto out; 2773 goto out;
2642 } 2774 }
2643 2775
2644out: 2776out:
2645 wl1271_free_ap_keys(wl); 2777 wl1271_free_ap_keys(wl, wlvif);
2646 return ret; 2778 return ret;
2647} 2779}
2648 2780
2649static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 2781static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2782 u16 action, u8 id, u8 key_type,
2650 u8 key_size, const u8 *key, u32 tx_seq_32, 2783 u8 key_size, const u8 *key, u32 tx_seq_32,
2651 u16 tx_seq_16, struct ieee80211_sta *sta) 2784 u16 tx_seq_16, struct ieee80211_sta *sta)
2652{ 2785{
2653 int ret; 2786 int ret;
2654 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 2787 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2655 2788
2656 if (is_ap) { 2789 if (is_ap) {
2657 struct wl1271_station *wl_sta; 2790 struct wl1271_station *wl_sta;
@@ -2661,10 +2794,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
2661 wl_sta = (struct wl1271_station *)sta->drv_priv; 2794 wl_sta = (struct wl1271_station *)sta->drv_priv;
2662 hlid = wl_sta->hlid; 2795 hlid = wl_sta->hlid;
2663 } else { 2796 } else {
2664 hlid = wl->ap_bcast_hlid; 2797 hlid = wlvif->ap.bcast_hlid;
2665 } 2798 }
2666 2799
2667 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { 2800 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
2668 /* 2801 /*
2669 * We do not support removing keys after AP shutdown. 2802 * We do not support removing keys after AP shutdown.
2670 * Pretend we do to make mac80211 happy. 2803 * Pretend we do to make mac80211 happy.
@@ -2672,12 +2805,12 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
2672 if (action != KEY_ADD_OR_REPLACE) 2805 if (action != KEY_ADD_OR_REPLACE)
2673 return 0; 2806 return 0;
2674 2807
2675 ret = wl1271_record_ap_key(wl, id, 2808 ret = wl1271_record_ap_key(wl, wlvif, id,
2676 key_type, key_size, 2809 key_type, key_size,
2677 key, hlid, tx_seq_32, 2810 key, hlid, tx_seq_32,
2678 tx_seq_16); 2811 tx_seq_16);
2679 } else { 2812 } else {
2680 ret = wl1271_cmd_set_ap_key(wl, action, 2813 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
2681 id, key_type, key_size, 2814 id, key_type, key_size,
2682 key, hlid, tx_seq_32, 2815 key, hlid, tx_seq_32,
2683 tx_seq_16); 2816 tx_seq_16);
@@ -2718,10 +2851,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
2718 2851
2719 /* don't remove key if hlid was already deleted */ 2852 /* don't remove key if hlid was already deleted */
2720 if (action == KEY_REMOVE && 2853 if (action == KEY_REMOVE &&
2721 wl->sta_hlid == WL12XX_INVALID_LINK_ID) 2854 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
2722 return 0; 2855 return 0;
2723 2856
2724 ret = wl1271_cmd_set_sta_key(wl, action, 2857 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
2725 id, key_type, key_size, 2858 id, key_type, key_size,
2726 key, addr, tx_seq_32, 2859 key, addr, tx_seq_32,
2727 tx_seq_16); 2860 tx_seq_16);
@@ -2731,8 +2864,8 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
2731 /* the default WEP key needs to be configured at least once */ 2864 /* the default WEP key needs to be configured at least once */
2732 if (key_type == KEY_WEP) { 2865 if (key_type == KEY_WEP) {
2733 ret = wl12xx_cmd_set_default_wep_key(wl, 2866 ret = wl12xx_cmd_set_default_wep_key(wl,
2734 wl->default_key, 2867 wlvif->default_key,
2735 wl->sta_hlid); 2868 wlvif->sta.hlid);
2736 if (ret < 0) 2869 if (ret < 0)
2737 return ret; 2870 return ret;
2738 } 2871 }
@@ -2747,6 +2880,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2747 struct ieee80211_key_conf *key_conf) 2880 struct ieee80211_key_conf *key_conf)
2748{ 2881{
2749 struct wl1271 *wl = hw->priv; 2882 struct wl1271 *wl = hw->priv;
2883 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2750 int ret; 2884 int ret;
2751 u32 tx_seq_32 = 0; 2885 u32 tx_seq_32 = 0;
2752 u16 tx_seq_16 = 0; 2886 u16 tx_seq_16 = 0;
@@ -2782,20 +2916,20 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2782 key_type = KEY_TKIP; 2916 key_type = KEY_TKIP;
2783 2917
2784 key_conf->hw_key_idx = key_conf->keyidx; 2918 key_conf->hw_key_idx = key_conf->keyidx;
2785 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); 2919 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2786 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); 2920 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2787 break; 2921 break;
2788 case WLAN_CIPHER_SUITE_CCMP: 2922 case WLAN_CIPHER_SUITE_CCMP:
2789 key_type = KEY_AES; 2923 key_type = KEY_AES;
2790 2924
2791 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 2925 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2792 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); 2926 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2793 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); 2927 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2794 break; 2928 break;
2795 case WL1271_CIPHER_SUITE_GEM: 2929 case WL1271_CIPHER_SUITE_GEM:
2796 key_type = KEY_GEM; 2930 key_type = KEY_GEM;
2797 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); 2931 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
2798 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); 2932 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
2799 break; 2933 break;
2800 default: 2934 default:
2801 wl1271_error("Unknown key algo 0x%x", key_conf->cipher); 2935 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -2806,7 +2940,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2806 2940
2807 switch (cmd) { 2941 switch (cmd) {
2808 case SET_KEY: 2942 case SET_KEY:
2809 ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE, 2943 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
2810 key_conf->keyidx, key_type, 2944 key_conf->keyidx, key_type,
2811 key_conf->keylen, key_conf->key, 2945 key_conf->keylen, key_conf->key,
2812 tx_seq_32, tx_seq_16, sta); 2946 tx_seq_32, tx_seq_16, sta);
@@ -2817,7 +2951,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2817 break; 2951 break;
2818 2952
2819 case DISABLE_KEY: 2953 case DISABLE_KEY:
2820 ret = wl1271_set_key(wl, KEY_REMOVE, 2954 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
2821 key_conf->keyidx, key_type, 2955 key_conf->keyidx, key_type,
2822 key_conf->keylen, key_conf->key, 2956 key_conf->keylen, key_conf->key,
2823 0, 0, sta); 2957 0, 0, sta);
@@ -2847,6 +2981,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
2847 struct cfg80211_scan_request *req) 2981 struct cfg80211_scan_request *req)
2848{ 2982{
2849 struct wl1271 *wl = hw->priv; 2983 struct wl1271 *wl = hw->priv;
2984 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2985
2850 int ret; 2986 int ret;
2851 u8 *ssid = NULL; 2987 u8 *ssid = NULL;
2852 size_t len = 0; 2988 size_t len = 0;
@@ -2876,16 +3012,15 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
2876 3012
2877 /* cancel ROC before scanning */ 3013 /* cancel ROC before scanning */
2878 if (wl12xx_is_roc(wl)) { 3014 if (wl12xx_is_roc(wl)) {
2879 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { 3015 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
2880 /* don't allow scanning right now */ 3016 /* don't allow scanning right now */
2881 ret = -EBUSY; 3017 ret = -EBUSY;
2882 goto out_sleep; 3018 goto out_sleep;
2883 } 3019 }
2884 wl12xx_croc(wl, wl->dev_role_id); 3020 wl12xx_stop_dev(wl, wlvif);
2885 wl12xx_cmd_role_stop_dev(wl);
2886 } 3021 }
2887 3022
2888 ret = wl1271_scan(hw->priv, ssid, len, req); 3023 ret = wl1271_scan(hw->priv, vif, ssid, len, req);
2889out_sleep: 3024out_sleep:
2890 wl1271_ps_elp_sleep(wl); 3025 wl1271_ps_elp_sleep(wl);
2891out: 3026out:
@@ -2921,6 +3056,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
2921 } 3056 }
2922 wl->scan.state = WL1271_SCAN_STATE_IDLE; 3057 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2923 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 3058 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3059 wl->scan_vif = NULL;
2924 wl->scan.req = NULL; 3060 wl->scan.req = NULL;
2925 ieee80211_scan_completed(wl->hw, true); 3061 ieee80211_scan_completed(wl->hw, true);
2926 3062
@@ -2938,6 +3074,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
2938 struct ieee80211_sched_scan_ies *ies) 3074 struct ieee80211_sched_scan_ies *ies)
2939{ 3075{
2940 struct wl1271 *wl = hw->priv; 3076 struct wl1271 *wl = hw->priv;
3077 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2941 int ret; 3078 int ret;
2942 3079
2943 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start"); 3080 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
@@ -2948,11 +3085,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
2948 if (ret < 0) 3085 if (ret < 0)
2949 goto out; 3086 goto out;
2950 3087
2951 ret = wl1271_scan_sched_scan_config(wl, req, ies); 3088 ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
2952 if (ret < 0) 3089 if (ret < 0)
2953 goto out_sleep; 3090 goto out_sleep;
2954 3091
2955 ret = wl1271_scan_sched_scan_start(wl); 3092 ret = wl1271_scan_sched_scan_start(wl, wlvif);
2956 if (ret < 0) 3093 if (ret < 0)
2957 goto out_sleep; 3094 goto out_sleep;
2958 3095
@@ -3017,6 +3154,7 @@ out:
3017static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3154static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3018{ 3155{
3019 struct wl1271 *wl = hw->priv; 3156 struct wl1271 *wl = hw->priv;
3157 struct wl12xx_vif *wlvif;
3020 int ret = 0; 3158 int ret = 0;
3021 3159
3022 mutex_lock(&wl->mutex); 3160 mutex_lock(&wl->mutex);
@@ -3030,10 +3168,11 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3030 if (ret < 0) 3168 if (ret < 0)
3031 goto out; 3169 goto out;
3032 3170
3033 ret = wl1271_acx_rts_threshold(wl, value); 3171 wl12xx_for_each_wlvif(wl, wlvif) {
3034 if (ret < 0) 3172 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3035 wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret); 3173 if (ret < 0)
3036 3174 wl1271_warning("set rts threshold failed: %d", ret);
3175 }
3037 wl1271_ps_elp_sleep(wl); 3176 wl1271_ps_elp_sleep(wl);
3038 3177
3039out: 3178out:
@@ -3042,9 +3181,10 @@ out:
3042 return ret; 3181 return ret;
3043} 3182}
3044 3183
3045static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, 3184static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
3046 int offset) 3185 int offset)
3047{ 3186{
3187 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3048 u8 ssid_len; 3188 u8 ssid_len;
3049 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, 3189 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
3050 skb->len - offset); 3190 skb->len - offset);
@@ -3060,8 +3200,8 @@ static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
3060 return -EINVAL; 3200 return -EINVAL;
3061 } 3201 }
3062 3202
3063 wl->ssid_len = ssid_len; 3203 wlvif->ssid_len = ssid_len;
3064 memcpy(wl->ssid, ptr+2, ssid_len); 3204 memcpy(wlvif->ssid, ptr+2, ssid_len);
3065 return 0; 3205 return 0;
3066} 3206}
3067 3207
@@ -3096,18 +3236,40 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3096 skb_trim(skb, skb->len - len); 3236 skb_trim(skb, skb->len - len);
3097} 3237}
3098 3238
3099static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, 3239static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3100 u8 *probe_rsp_data, 3240 struct ieee80211_vif *vif)
3101 size_t probe_rsp_len,
3102 u32 rates)
3103{ 3241{
3104 struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; 3242 struct sk_buff *skb;
3243 int ret;
3244
3245 skb = ieee80211_proberesp_get(wl->hw, vif);
3246 if (!skb)
3247 return -EOPNOTSUPP;
3248
3249 ret = wl1271_cmd_template_set(wl,
3250 CMD_TEMPL_AP_PROBE_RESPONSE,
3251 skb->data,
3252 skb->len, 0,
3253 rates);
3254
3255 dev_kfree_skb(skb);
3256 return ret;
3257}
3258
3259static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3260 struct ieee80211_vif *vif,
3261 u8 *probe_rsp_data,
3262 size_t probe_rsp_len,
3263 u32 rates)
3264{
3265 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3266 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3105 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE]; 3267 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3106 int ssid_ie_offset, ie_offset, templ_len; 3268 int ssid_ie_offset, ie_offset, templ_len;
3107 const u8 *ptr; 3269 const u8 *ptr;
3108 3270
3109 /* no need to change probe response if the SSID is set correctly */ 3271 /* no need to change probe response if the SSID is set correctly */
3110 if (wl->ssid_len > 0) 3272 if (wlvif->ssid_len > 0)
3111 return wl1271_cmd_template_set(wl, 3273 return wl1271_cmd_template_set(wl,
3112 CMD_TEMPL_AP_PROBE_RESPONSE, 3274 CMD_TEMPL_AP_PROBE_RESPONSE,
3113 probe_rsp_data, 3275 probe_rsp_data,
@@ -3153,16 +3315,18 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
3153} 3315}
3154 3316
3155static int wl1271_bss_erp_info_changed(struct wl1271 *wl, 3317static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3318 struct ieee80211_vif *vif,
3156 struct ieee80211_bss_conf *bss_conf, 3319 struct ieee80211_bss_conf *bss_conf,
3157 u32 changed) 3320 u32 changed)
3158{ 3321{
3322 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3159 int ret = 0; 3323 int ret = 0;
3160 3324
3161 if (changed & BSS_CHANGED_ERP_SLOT) { 3325 if (changed & BSS_CHANGED_ERP_SLOT) {
3162 if (bss_conf->use_short_slot) 3326 if (bss_conf->use_short_slot)
3163 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); 3327 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3164 else 3328 else
3165 ret = wl1271_acx_slot(wl, SLOT_TIME_LONG); 3329 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3166 if (ret < 0) { 3330 if (ret < 0) {
3167 wl1271_warning("Set slot time failed %d", ret); 3331 wl1271_warning("Set slot time failed %d", ret);
3168 goto out; 3332 goto out;
@@ -3171,16 +3335,18 @@ static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3171 3335
3172 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 3336 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3173 if (bss_conf->use_short_preamble) 3337 if (bss_conf->use_short_preamble)
3174 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); 3338 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3175 else 3339 else
3176 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG); 3340 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3177 } 3341 }
3178 3342
3179 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 3343 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3180 if (bss_conf->use_cts_prot) 3344 if (bss_conf->use_cts_prot)
3181 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE); 3345 ret = wl1271_acx_cts_protect(wl, wlvif,
3346 CTSPROTECT_ENABLE);
3182 else 3347 else
3183 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE); 3348 ret = wl1271_acx_cts_protect(wl, wlvif,
3349 CTSPROTECT_DISABLE);
3184 if (ret < 0) { 3350 if (ret < 0) {
3185 wl1271_warning("Set ctsprotect failed %d", ret); 3351 wl1271_warning("Set ctsprotect failed %d", ret);
3186 goto out; 3352 goto out;
@@ -3196,14 +3362,23 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3196 struct ieee80211_bss_conf *bss_conf, 3362 struct ieee80211_bss_conf *bss_conf,
3197 u32 changed) 3363 u32 changed)
3198{ 3364{
3199 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 3365 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3366 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3200 int ret = 0; 3367 int ret = 0;
3201 3368
3202 if ((changed & BSS_CHANGED_BEACON_INT)) { 3369 if ((changed & BSS_CHANGED_BEACON_INT)) {
3203 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d", 3370 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3204 bss_conf->beacon_int); 3371 bss_conf->beacon_int);
3205 3372
3206 wl->beacon_int = bss_conf->beacon_int; 3373 wlvif->beacon_int = bss_conf->beacon_int;
3374 }
3375
3376 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3377 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3378 if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
3379 wl1271_debug(DEBUG_AP, "probe response updated");
3380 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3381 }
3207 } 3382 }
3208 3383
3209 if ((changed & BSS_CHANGED_BEACON)) { 3384 if ((changed & BSS_CHANGED_BEACON)) {
@@ -3214,17 +3389,19 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3214 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); 3389 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3215 u16 tmpl_id; 3390 u16 tmpl_id;
3216 3391
3217 if (!beacon) 3392 if (!beacon) {
3393 ret = -EINVAL;
3218 goto out; 3394 goto out;
3395 }
3219 3396
3220 wl1271_debug(DEBUG_MASTER, "beacon updated"); 3397 wl1271_debug(DEBUG_MASTER, "beacon updated");
3221 3398
3222 ret = wl1271_ssid_set(wl, beacon, ieoffset); 3399 ret = wl1271_ssid_set(vif, beacon, ieoffset);
3223 if (ret < 0) { 3400 if (ret < 0) {
3224 dev_kfree_skb(beacon); 3401 dev_kfree_skb(beacon);
3225 goto out; 3402 goto out;
3226 } 3403 }
3227 min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 3404 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3228 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON : 3405 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3229 CMD_TEMPL_BEACON; 3406 CMD_TEMPL_BEACON;
3230 ret = wl1271_cmd_template_set(wl, tmpl_id, 3407 ret = wl1271_cmd_template_set(wl, tmpl_id,
@@ -3236,6 +3413,13 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3236 goto out; 3413 goto out;
3237 } 3414 }
3238 3415
3416 /*
3417 * In case we already have a probe-resp beacon set explicitly
3418 * by usermode, don't use the beacon data.
3419 */
3420 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3421 goto end_bcn;
3422
3239 /* remove TIM ie from probe response */ 3423 /* remove TIM ie from probe response */
3240 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset); 3424 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3241 3425
@@ -3254,7 +3438,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3254 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 3438 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3255 IEEE80211_STYPE_PROBE_RESP); 3439 IEEE80211_STYPE_PROBE_RESP);
3256 if (is_ap) 3440 if (is_ap)
3257 ret = wl1271_ap_set_probe_resp_tmpl(wl, 3441 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3258 beacon->data, 3442 beacon->data,
3259 beacon->len, 3443 beacon->len,
3260 min_rate); 3444 min_rate);
@@ -3264,12 +3448,15 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3264 beacon->data, 3448 beacon->data,
3265 beacon->len, 0, 3449 beacon->len, 0,
3266 min_rate); 3450 min_rate);
3451end_bcn:
3267 dev_kfree_skb(beacon); 3452 dev_kfree_skb(beacon);
3268 if (ret < 0) 3453 if (ret < 0)
3269 goto out; 3454 goto out;
3270 } 3455 }
3271 3456
3272out: 3457out:
3458 if (ret != 0)
3459 wl1271_error("beacon info change failed: %d", ret);
3273 return ret; 3460 return ret;
3274} 3461}
3275 3462
@@ -3279,23 +3466,24 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3279 struct ieee80211_bss_conf *bss_conf, 3466 struct ieee80211_bss_conf *bss_conf,
3280 u32 changed) 3467 u32 changed)
3281{ 3468{
3469 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3282 int ret = 0; 3470 int ret = 0;
3283 3471
3284 if ((changed & BSS_CHANGED_BASIC_RATES)) { 3472 if ((changed & BSS_CHANGED_BASIC_RATES)) {
3285 u32 rates = bss_conf->basic_rates; 3473 u32 rates = bss_conf->basic_rates;
3286 3474
3287 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, 3475 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3288 wl->band); 3476 wlvif->band);
3289 wl->basic_rate = wl1271_tx_min_rate_get(wl, 3477 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3290 wl->basic_rate_set); 3478 wlvif->basic_rate_set);
3291 3479
3292 ret = wl1271_init_ap_rates(wl); 3480 ret = wl1271_init_ap_rates(wl, wlvif);
3293 if (ret < 0) { 3481 if (ret < 0) {
3294 wl1271_error("AP rate policy change failed %d", ret); 3482 wl1271_error("AP rate policy change failed %d", ret);
3295 goto out; 3483 goto out;
3296 } 3484 }
3297 3485
3298 ret = wl1271_ap_init_templates(wl); 3486 ret = wl1271_ap_init_templates(wl, vif);
3299 if (ret < 0) 3487 if (ret < 0)
3300 goto out; 3488 goto out;
3301 } 3489 }
@@ -3306,38 +3494,40 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3306 3494
3307 if ((changed & BSS_CHANGED_BEACON_ENABLED)) { 3495 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3308 if (bss_conf->enable_beacon) { 3496 if (bss_conf->enable_beacon) {
3309 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { 3497 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3310 ret = wl12xx_cmd_role_start_ap(wl); 3498 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3311 if (ret < 0) 3499 if (ret < 0)
3312 goto out; 3500 goto out;
3313 3501
3314 ret = wl1271_ap_init_hwenc(wl); 3502 ret = wl1271_ap_init_hwenc(wl, wlvif);
3315 if (ret < 0) 3503 if (ret < 0)
3316 goto out; 3504 goto out;
3317 3505
3318 set_bit(WL1271_FLAG_AP_STARTED, &wl->flags); 3506 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3319 wl1271_debug(DEBUG_AP, "started AP"); 3507 wl1271_debug(DEBUG_AP, "started AP");
3320 } 3508 }
3321 } else { 3509 } else {
3322 if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { 3510 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3323 ret = wl12xx_cmd_role_stop_ap(wl); 3511 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3324 if (ret < 0) 3512 if (ret < 0)
3325 goto out; 3513 goto out;
3326 3514
3327 clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags); 3515 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3516 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3517 &wlvif->flags);
3328 wl1271_debug(DEBUG_AP, "stopped AP"); 3518 wl1271_debug(DEBUG_AP, "stopped AP");
3329 } 3519 }
3330 } 3520 }
3331 } 3521 }
3332 3522
3333 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); 3523 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3334 if (ret < 0) 3524 if (ret < 0)
3335 goto out; 3525 goto out;
3336 3526
3337 /* Handle HT information change */ 3527 /* Handle HT information change */
3338 if ((changed & BSS_CHANGED_HT) && 3528 if ((changed & BSS_CHANGED_HT) &&
3339 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { 3529 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3340 ret = wl1271_acx_set_ht_information(wl, 3530 ret = wl1271_acx_set_ht_information(wl, wlvif,
3341 bss_conf->ht_operation_mode); 3531 bss_conf->ht_operation_mode);
3342 if (ret < 0) { 3532 if (ret < 0) {
3343 wl1271_warning("Set ht information failed %d", ret); 3533 wl1271_warning("Set ht information failed %d", ret);
@@ -3355,8 +3545,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3355 struct ieee80211_bss_conf *bss_conf, 3545 struct ieee80211_bss_conf *bss_conf,
3356 u32 changed) 3546 u32 changed)
3357{ 3547{
3548 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3358 bool do_join = false, set_assoc = false; 3549 bool do_join = false, set_assoc = false;
3359 bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS); 3550 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
3360 bool ibss_joined = false; 3551 bool ibss_joined = false;
3361 u32 sta_rate_set = 0; 3552 u32 sta_rate_set = 0;
3362 int ret; 3553 int ret;
@@ -3373,14 +3564,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3373 3564
3374 if (changed & BSS_CHANGED_IBSS) { 3565 if (changed & BSS_CHANGED_IBSS) {
3375 if (bss_conf->ibss_joined) { 3566 if (bss_conf->ibss_joined) {
3376 set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags); 3567 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
3377 ibss_joined = true; 3568 ibss_joined = true;
3378 } else { 3569 } else {
3379 if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED, 3570 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
3380 &wl->flags)) { 3571 &wlvif->flags)) {
3381 wl1271_unjoin(wl); 3572 wl1271_unjoin(wl, wlvif);
3382 wl12xx_cmd_role_start_dev(wl); 3573 wl12xx_start_dev(wl, wlvif);
3383 wl12xx_roc(wl, wl->dev_role_id);
3384 } 3574 }
3385 } 3575 }
3386 } 3576 }
@@ -3396,46 +3586,40 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3396 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", 3586 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3397 bss_conf->enable_beacon ? "enabled" : "disabled"); 3587 bss_conf->enable_beacon ? "enabled" : "disabled");
3398 3588
3399 if (bss_conf->enable_beacon)
3400 wl->set_bss_type = BSS_TYPE_IBSS;
3401 else
3402 wl->set_bss_type = BSS_TYPE_STA_BSS;
3403 do_join = true; 3589 do_join = true;
3404 } 3590 }
3405 3591
3592 if (changed & BSS_CHANGED_IDLE) {
3593 ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
3594 if (ret < 0)
3595 wl1271_warning("idle mode change failed %d", ret);
3596 }
3597
3406 if ((changed & BSS_CHANGED_CQM)) { 3598 if ((changed & BSS_CHANGED_CQM)) {
3407 bool enable = false; 3599 bool enable = false;
3408 if (bss_conf->cqm_rssi_thold) 3600 if (bss_conf->cqm_rssi_thold)
3409 enable = true; 3601 enable = true;
3410 ret = wl1271_acx_rssi_snr_trigger(wl, enable, 3602 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
3411 bss_conf->cqm_rssi_thold, 3603 bss_conf->cqm_rssi_thold,
3412 bss_conf->cqm_rssi_hyst); 3604 bss_conf->cqm_rssi_hyst);
3413 if (ret < 0) 3605 if (ret < 0)
3414 goto out; 3606 goto out;
3415 wl->rssi_thold = bss_conf->cqm_rssi_thold; 3607 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3416 } 3608 }
3417 3609
3418 if ((changed & BSS_CHANGED_BSSID) && 3610 if (changed & BSS_CHANGED_BSSID)
3419 /* 3611 if (!is_zero_ether_addr(bss_conf->bssid)) {
3420 * Now we know the correct bssid, so we send a new join command 3612 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3421 * and enable the BSSID filter
3422 */
3423 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
3424 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
3425
3426 if (!is_zero_ether_addr(wl->bssid)) {
3427 ret = wl1271_cmd_build_null_data(wl);
3428 if (ret < 0) 3613 if (ret < 0)
3429 goto out; 3614 goto out;
3430 3615
3431 ret = wl1271_build_qos_null_data(wl); 3616 ret = wl1271_build_qos_null_data(wl, vif);
3432 if (ret < 0) 3617 if (ret < 0)
3433 goto out; 3618 goto out;
3434 3619
3435 /* Need to update the BSSID (for filtering etc) */ 3620 /* Need to update the BSSID (for filtering etc) */
3436 do_join = true; 3621 do_join = true;
3437 } 3622 }
3438 }
3439 3623
3440 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) { 3624 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
3441 rcu_read_lock(); 3625 rcu_read_lock();
@@ -3459,26 +3643,28 @@ sta_not_found:
3459 if (bss_conf->assoc) { 3643 if (bss_conf->assoc) {
3460 u32 rates; 3644 u32 rates;
3461 int ieoffset; 3645 int ieoffset;
3462 wl->aid = bss_conf->aid; 3646 wlvif->aid = bss_conf->aid;
3463 set_assoc = true; 3647 set_assoc = true;
3464 3648
3465 wl->ps_poll_failures = 0; 3649 wlvif->ps_poll_failures = 0;
3466 3650
3467 /* 3651 /*
3468 * use basic rates from AP, and determine lowest rate 3652 * use basic rates from AP, and determine lowest rate
3469 * to use with control frames. 3653 * to use with control frames.
3470 */ 3654 */
3471 rates = bss_conf->basic_rates; 3655 rates = bss_conf->basic_rates;
3472 wl->basic_rate_set = 3656 wlvif->basic_rate_set =
3473 wl1271_tx_enabled_rates_get(wl, rates, 3657 wl1271_tx_enabled_rates_get(wl, rates,
3474 wl->band); 3658 wlvif->band);
3475 wl->basic_rate = 3659 wlvif->basic_rate =
3476 wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 3660 wl1271_tx_min_rate_get(wl,
3661 wlvif->basic_rate_set);
3477 if (sta_rate_set) 3662 if (sta_rate_set)
3478 wl->rate_set = wl1271_tx_enabled_rates_get(wl, 3663 wlvif->rate_set =
3664 wl1271_tx_enabled_rates_get(wl,
3479 sta_rate_set, 3665 sta_rate_set,
3480 wl->band); 3666 wlvif->band);
3481 ret = wl1271_acx_sta_rate_policies(wl); 3667 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3482 if (ret < 0) 3668 if (ret < 0)
3483 goto out; 3669 goto out;
3484 3670
@@ -3488,53 +3674,56 @@ sta_not_found:
3488 * updates it by itself when the first beacon is 3674 * updates it by itself when the first beacon is
3489 * received after a join. 3675 * received after a join.
3490 */ 3676 */
3491 ret = wl1271_cmd_build_ps_poll(wl, wl->aid); 3677 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
3492 if (ret < 0) 3678 if (ret < 0)
3493 goto out; 3679 goto out;
3494 3680
3495 /* 3681 /*
3496 * Get a template for hardware connection maintenance 3682 * Get a template for hardware connection maintenance
3497 */ 3683 */
3498 dev_kfree_skb(wl->probereq); 3684 dev_kfree_skb(wlvif->probereq);
3499 wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL); 3685 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
3686 wlvif,
3687 NULL);
3500 ieoffset = offsetof(struct ieee80211_mgmt, 3688 ieoffset = offsetof(struct ieee80211_mgmt,
3501 u.probe_req.variable); 3689 u.probe_req.variable);
3502 wl1271_ssid_set(wl, wl->probereq, ieoffset); 3690 wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
3503 3691
3504 /* enable the connection monitoring feature */ 3692 /* enable the connection monitoring feature */
3505 ret = wl1271_acx_conn_monit_params(wl, true); 3693 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
3506 if (ret < 0) 3694 if (ret < 0)
3507 goto out; 3695 goto out;
3508 } else { 3696 } else {
3509 /* use defaults when not associated */ 3697 /* use defaults when not associated */
3510 bool was_assoc = 3698 bool was_assoc =
3511 !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED, 3699 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
3512 &wl->flags); 3700 &wlvif->flags);
3513 bool was_ifup = 3701 bool was_ifup =
3514 !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT, 3702 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
3515 &wl->flags); 3703 &wlvif->flags);
3516 wl->aid = 0; 3704 wlvif->aid = 0;
3517 3705
3518 /* free probe-request template */ 3706 /* free probe-request template */
3519 dev_kfree_skb(wl->probereq); 3707 dev_kfree_skb(wlvif->probereq);
3520 wl->probereq = NULL; 3708 wlvif->probereq = NULL;
3521 3709
3522 /* re-enable dynamic ps - just in case */ 3710 /* re-enable dynamic ps - just in case */
3523 ieee80211_enable_dyn_ps(wl->vif); 3711 ieee80211_enable_dyn_ps(vif);
3524 3712
3525 /* revert back to minimum rates for the current band */ 3713 /* revert back to minimum rates for the current band */
3526 wl1271_set_band_rate(wl); 3714 wl1271_set_band_rate(wl, wlvif);
3527 wl->basic_rate = 3715 wlvif->basic_rate =
3528 wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 3716 wl1271_tx_min_rate_get(wl,
3529 ret = wl1271_acx_sta_rate_policies(wl); 3717 wlvif->basic_rate_set);
3718 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3530 if (ret < 0) 3719 if (ret < 0)
3531 goto out; 3720 goto out;
3532 3721
3533 /* disable connection monitor features */ 3722 /* disable connection monitor features */
3534 ret = wl1271_acx_conn_monit_params(wl, false); 3723 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3535 3724
3536 /* Disable the keep-alive feature */ 3725 /* Disable the keep-alive feature */
3537 ret = wl1271_acx_keep_alive_mode(wl, false); 3726 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3538 if (ret < 0) 3727 if (ret < 0)
3539 goto out; 3728 goto out;
3540 3729
@@ -3546,7 +3735,7 @@ sta_not_found:
3546 * no IF_OPER_UP notification. 3735 * no IF_OPER_UP notification.
3547 */ 3736 */
3548 if (!was_ifup) { 3737 if (!was_ifup) {
3549 ret = wl12xx_croc(wl, wl->role_id); 3738 ret = wl12xx_croc(wl, wlvif->role_id);
3550 if (ret < 0) 3739 if (ret < 0)
3551 goto out; 3740 goto out;
3552 } 3741 }
@@ -3555,17 +3744,16 @@ sta_not_found:
3555 * roaming on the same channel. until we will 3744 * roaming on the same channel. until we will
3556 * have a better flow...) 3745 * have a better flow...)
3557 */ 3746 */
3558 if (test_bit(wl->dev_role_id, wl->roc_map)) { 3747 if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
3559 ret = wl12xx_croc(wl, wl->dev_role_id); 3748 ret = wl12xx_croc(wl,
3749 wlvif->dev_role_id);
3560 if (ret < 0) 3750 if (ret < 0)
3561 goto out; 3751 goto out;
3562 } 3752 }
3563 3753
3564 wl1271_unjoin(wl); 3754 wl1271_unjoin(wl, wlvif);
3565 if (!(conf_flags & IEEE80211_CONF_IDLE)) { 3755 if (!(conf_flags & IEEE80211_CONF_IDLE))
3566 wl12xx_cmd_role_start_dev(wl); 3756 wl12xx_start_dev(wl, wlvif);
3567 wl12xx_roc(wl, wl->dev_role_id);
3568 }
3569 } 3757 }
3570 } 3758 }
3571 } 3759 }
@@ -3576,27 +3764,28 @@ sta_not_found:
3576 3764
3577 if (bss_conf->ibss_joined) { 3765 if (bss_conf->ibss_joined) {
3578 u32 rates = bss_conf->basic_rates; 3766 u32 rates = bss_conf->basic_rates;
3579 wl->basic_rate_set = 3767 wlvif->basic_rate_set =
3580 wl1271_tx_enabled_rates_get(wl, rates, 3768 wl1271_tx_enabled_rates_get(wl, rates,
3581 wl->band); 3769 wlvif->band);
3582 wl->basic_rate = 3770 wlvif->basic_rate =
3583 wl1271_tx_min_rate_get(wl, wl->basic_rate_set); 3771 wl1271_tx_min_rate_get(wl,
3772 wlvif->basic_rate_set);
3584 3773
3585 /* by default, use 11b + OFDM rates */ 3774 /* by default, use 11b + OFDM rates */
3586 wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES; 3775 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
3587 ret = wl1271_acx_sta_rate_policies(wl); 3776 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3588 if (ret < 0) 3777 if (ret < 0)
3589 goto out; 3778 goto out;
3590 } 3779 }
3591 } 3780 }
3592 3781
3593 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); 3782 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3594 if (ret < 0) 3783 if (ret < 0)
3595 goto out; 3784 goto out;
3596 3785
3597 if (changed & BSS_CHANGED_ARP_FILTER) { 3786 if (changed & BSS_CHANGED_ARP_FILTER) {
3598 __be32 addr = bss_conf->arp_addr_list[0]; 3787 __be32 addr = bss_conf->arp_addr_list[0];
3599 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); 3788 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
3600 3789
3601 if (bss_conf->arp_addr_cnt == 1 && 3790 if (bss_conf->arp_addr_cnt == 1 &&
3602 bss_conf->arp_filter_enabled) { 3791 bss_conf->arp_filter_enabled) {
@@ -3606,24 +3795,24 @@ sta_not_found:
3606 * isn't being set (when sending), so we have to 3795 * isn't being set (when sending), so we have to
3607 * reconfigure the template upon every ip change. 3796 * reconfigure the template upon every ip change.
3608 */ 3797 */
3609 ret = wl1271_cmd_build_arp_rsp(wl, addr); 3798 ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr);
3610 if (ret < 0) { 3799 if (ret < 0) {
3611 wl1271_warning("build arp rsp failed: %d", ret); 3800 wl1271_warning("build arp rsp failed: %d", ret);
3612 goto out; 3801 goto out;
3613 } 3802 }
3614 3803
3615 ret = wl1271_acx_arp_ip_filter(wl, 3804 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
3616 ACX_ARP_FILTER_ARP_FILTERING, 3805 ACX_ARP_FILTER_ARP_FILTERING,
3617 addr); 3806 addr);
3618 } else 3807 } else
3619 ret = wl1271_acx_arp_ip_filter(wl, 0, addr); 3808 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
3620 3809
3621 if (ret < 0) 3810 if (ret < 0)
3622 goto out; 3811 goto out;
3623 } 3812 }
3624 3813
3625 if (do_join) { 3814 if (do_join) {
3626 ret = wl1271_join(wl, set_assoc); 3815 ret = wl1271_join(wl, wlvif, set_assoc);
3627 if (ret < 0) { 3816 if (ret < 0) {
3628 wl1271_warning("cmd join failed %d", ret); 3817 wl1271_warning("cmd join failed %d", ret);
3629 goto out; 3818 goto out;
@@ -3631,35 +3820,31 @@ sta_not_found:
3631 3820
3632 /* ROC until connected (after EAPOL exchange) */ 3821 /* ROC until connected (after EAPOL exchange) */
3633 if (!is_ibss) { 3822 if (!is_ibss) {
3634 ret = wl12xx_roc(wl, wl->role_id); 3823 ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
3635 if (ret < 0) 3824 if (ret < 0)
3636 goto out; 3825 goto out;
3637 3826
3638 wl1271_check_operstate(wl, 3827 wl1271_check_operstate(wl, wlvif,
3639 ieee80211_get_operstate(vif)); 3828 ieee80211_get_operstate(vif));
3640 } 3829 }
3641 /* 3830 /*
3642 * stop device role if started (we might already be in 3831 * stop device role if started (we might already be in
3643 * STA role). TODO: make it better. 3832 * STA role). TODO: make it better.
3644 */ 3833 */
3645 if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) { 3834 if (wlvif->dev_role_id != WL12XX_INVALID_ROLE_ID) {
3646 ret = wl12xx_croc(wl, wl->dev_role_id); 3835 ret = wl12xx_stop_dev(wl, wlvif);
3647 if (ret < 0)
3648 goto out;
3649
3650 ret = wl12xx_cmd_role_stop_dev(wl);
3651 if (ret < 0) 3836 if (ret < 0)
3652 goto out; 3837 goto out;
3653 } 3838 }
3654 3839
3655 /* If we want to go in PSM but we're not there yet */ 3840 /* If we want to go in PSM but we're not there yet */
3656 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) && 3841 if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) &&
3657 !test_bit(WL1271_FLAG_PSM, &wl->flags)) { 3842 !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
3658 enum wl1271_cmd_ps_mode mode; 3843 enum wl1271_cmd_ps_mode mode;
3659 3844
3660 mode = STATION_POWER_SAVE_MODE; 3845 mode = STATION_POWER_SAVE_MODE;
3661 ret = wl1271_ps_set_mode(wl, mode, 3846 ret = wl1271_ps_set_mode(wl, wlvif, mode,
3662 wl->basic_rate, 3847 wlvif->basic_rate,
3663 true); 3848 true);
3664 if (ret < 0) 3849 if (ret < 0)
3665 goto out; 3850 goto out;
@@ -3673,7 +3858,7 @@ sta_not_found:
3673 ret = wl1271_acx_set_ht_capabilities(wl, 3858 ret = wl1271_acx_set_ht_capabilities(wl,
3674 &sta_ht_cap, 3859 &sta_ht_cap,
3675 true, 3860 true,
3676 wl->sta_hlid); 3861 wlvif->sta.hlid);
3677 if (ret < 0) { 3862 if (ret < 0) {
3678 wl1271_warning("Set ht cap true failed %d", 3863 wl1271_warning("Set ht cap true failed %d",
3679 ret); 3864 ret);
@@ -3685,7 +3870,7 @@ sta_not_found:
3685 ret = wl1271_acx_set_ht_capabilities(wl, 3870 ret = wl1271_acx_set_ht_capabilities(wl,
3686 &sta_ht_cap, 3871 &sta_ht_cap,
3687 false, 3872 false,
3688 wl->sta_hlid); 3873 wlvif->sta.hlid);
3689 if (ret < 0) { 3874 if (ret < 0) {
3690 wl1271_warning("Set ht cap false failed %d", 3875 wl1271_warning("Set ht cap false failed %d",
3691 ret); 3876 ret);
@@ -3697,7 +3882,7 @@ sta_not_found:
3697 /* Handle HT information change. Done after join. */ 3882 /* Handle HT information change. Done after join. */
3698 if ((changed & BSS_CHANGED_HT) && 3883 if ((changed & BSS_CHANGED_HT) &&
3699 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { 3884 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3700 ret = wl1271_acx_set_ht_information(wl, 3885 ret = wl1271_acx_set_ht_information(wl, wlvif,
3701 bss_conf->ht_operation_mode); 3886 bss_conf->ht_operation_mode);
3702 if (ret < 0) { 3887 if (ret < 0) {
3703 wl1271_warning("Set ht information failed %d", ret); 3888 wl1271_warning("Set ht information failed %d", ret);
@@ -3715,7 +3900,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
3715 u32 changed) 3900 u32 changed)
3716{ 3901{
3717 struct wl1271 *wl = hw->priv; 3902 struct wl1271 *wl = hw->priv;
3718 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 3903 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3904 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3719 int ret; 3905 int ret;
3720 3906
3721 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x", 3907 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
@@ -3726,6 +3912,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
3726 if (unlikely(wl->state == WL1271_STATE_OFF)) 3912 if (unlikely(wl->state == WL1271_STATE_OFF))
3727 goto out; 3913 goto out;
3728 3914
3915 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
3916 goto out;
3917
3729 ret = wl1271_ps_elp_wakeup(wl); 3918 ret = wl1271_ps_elp_wakeup(wl);
3730 if (ret < 0) 3919 if (ret < 0)
3731 goto out; 3920 goto out;
@@ -3746,6 +3935,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
3746 const struct ieee80211_tx_queue_params *params) 3935 const struct ieee80211_tx_queue_params *params)
3747{ 3936{
3748 struct wl1271 *wl = hw->priv; 3937 struct wl1271 *wl = hw->priv;
3938 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3749 u8 ps_scheme; 3939 u8 ps_scheme;
3750 int ret = 0; 3940 int ret = 0;
3751 3941
@@ -3792,13 +3982,13 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
3792 * the txop is confed in units of 32us by the mac80211, 3982 * the txop is confed in units of 32us by the mac80211,
3793 * we need us 3983 * we need us
3794 */ 3984 */
3795 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue), 3985 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
3796 params->cw_min, params->cw_max, 3986 params->cw_min, params->cw_max,
3797 params->aifs, params->txop << 5); 3987 params->aifs, params->txop << 5);
3798 if (ret < 0) 3988 if (ret < 0)
3799 goto out_sleep; 3989 goto out_sleep;
3800 3990
3801 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), 3991 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
3802 CONF_CHANNEL_TYPE_EDCF, 3992 CONF_CHANNEL_TYPE_EDCF,
3803 wl1271_tx_get_queue(queue), 3993 wl1271_tx_get_queue(queue),
3804 ps_scheme, CONF_ACK_POLICY_LEGACY, 3994 ps_scheme, CONF_ACK_POLICY_LEGACY,
@@ -3861,43 +4051,43 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
3861} 4051}
3862 4052
3863static int wl1271_allocate_sta(struct wl1271 *wl, 4053static int wl1271_allocate_sta(struct wl1271 *wl,
3864 struct ieee80211_sta *sta, 4054 struct wl12xx_vif *wlvif,
3865 u8 *hlid) 4055 struct ieee80211_sta *sta)
3866{ 4056{
3867 struct wl1271_station *wl_sta; 4057 struct wl1271_station *wl_sta;
3868 int id; 4058 int ret;
4059
3869 4060
3870 id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS); 4061 if (wl->active_sta_count >= AP_MAX_STATIONS) {
3871 if (id >= AP_MAX_STATIONS) {
3872 wl1271_warning("could not allocate HLID - too much stations"); 4062 wl1271_warning("could not allocate HLID - too much stations");
3873 return -EBUSY; 4063 return -EBUSY;
3874 } 4064 }
3875 4065
3876 wl_sta = (struct wl1271_station *)sta->drv_priv; 4066 wl_sta = (struct wl1271_station *)sta->drv_priv;
3877 set_bit(id, wl->ap_hlid_map); 4067 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
3878 wl_sta->hlid = WL1271_AP_STA_HLID_START + id; 4068 if (ret < 0) {
3879 *hlid = wl_sta->hlid; 4069 wl1271_warning("could not allocate HLID - too many links");
4070 return -EBUSY;
4071 }
4072
4073 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
3880 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); 4074 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
3881 wl->active_sta_count++; 4075 wl->active_sta_count++;
3882 return 0; 4076 return 0;
3883} 4077}
3884 4078
3885void wl1271_free_sta(struct wl1271 *wl, u8 hlid) 4079void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
3886{ 4080{
3887 int id = hlid - WL1271_AP_STA_HLID_START; 4081 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
3888
3889 if (hlid < WL1271_AP_STA_HLID_START)
3890 return; 4082 return;
3891 4083
3892 if (!test_bit(id, wl->ap_hlid_map)) 4084 clear_bit(hlid, wlvif->ap.sta_hlid_map);
3893 return;
3894
3895 clear_bit(id, wl->ap_hlid_map);
3896 memset(wl->links[hlid].addr, 0, ETH_ALEN); 4085 memset(wl->links[hlid].addr, 0, ETH_ALEN);
3897 wl->links[hlid].ba_bitmap = 0; 4086 wl->links[hlid].ba_bitmap = 0;
3898 wl1271_tx_reset_link_queues(wl, hlid); 4087 wl1271_tx_reset_link_queues(wl, hlid);
3899 __clear_bit(hlid, &wl->ap_ps_map); 4088 __clear_bit(hlid, &wl->ap_ps_map);
3900 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 4089 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4090 wl12xx_free_link(wl, wlvif, &hlid);
3901 wl->active_sta_count--; 4091 wl->active_sta_count--;
3902} 4092}
3903 4093
@@ -3906,6 +4096,8 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
3906 struct ieee80211_sta *sta) 4096 struct ieee80211_sta *sta)
3907{ 4097{
3908 struct wl1271 *wl = hw->priv; 4098 struct wl1271 *wl = hw->priv;
4099 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4100 struct wl1271_station *wl_sta;
3909 int ret = 0; 4101 int ret = 0;
3910 u8 hlid; 4102 u8 hlid;
3911 4103
@@ -3914,20 +4106,23 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
3914 if (unlikely(wl->state == WL1271_STATE_OFF)) 4106 if (unlikely(wl->state == WL1271_STATE_OFF))
3915 goto out; 4107 goto out;
3916 4108
3917 if (wl->bss_type != BSS_TYPE_AP_BSS) 4109 if (wlvif->bss_type != BSS_TYPE_AP_BSS)
3918 goto out; 4110 goto out;
3919 4111
3920 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid); 4112 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
3921 4113
3922 ret = wl1271_allocate_sta(wl, sta, &hlid); 4114 ret = wl1271_allocate_sta(wl, wlvif, sta);
3923 if (ret < 0) 4115 if (ret < 0)
3924 goto out; 4116 goto out;
3925 4117
4118 wl_sta = (struct wl1271_station *)sta->drv_priv;
4119 hlid = wl_sta->hlid;
4120
3926 ret = wl1271_ps_elp_wakeup(wl); 4121 ret = wl1271_ps_elp_wakeup(wl);
3927 if (ret < 0) 4122 if (ret < 0)
3928 goto out_free_sta; 4123 goto out_free_sta;
3929 4124
3930 ret = wl12xx_cmd_add_peer(wl, sta, hlid); 4125 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
3931 if (ret < 0) 4126 if (ret < 0)
3932 goto out_sleep; 4127 goto out_sleep;
3933 4128
@@ -3944,7 +4139,7 @@ out_sleep:
3944 4139
3945out_free_sta: 4140out_free_sta:
3946 if (ret < 0) 4141 if (ret < 0)
3947 wl1271_free_sta(wl, hlid); 4142 wl1271_free_sta(wl, wlvif, hlid);
3948 4143
3949out: 4144out:
3950 mutex_unlock(&wl->mutex); 4145 mutex_unlock(&wl->mutex);
@@ -3956,6 +4151,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
3956 struct ieee80211_sta *sta) 4151 struct ieee80211_sta *sta)
3957{ 4152{
3958 struct wl1271 *wl = hw->priv; 4153 struct wl1271 *wl = hw->priv;
4154 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3959 struct wl1271_station *wl_sta; 4155 struct wl1271_station *wl_sta;
3960 int ret = 0, id; 4156 int ret = 0, id;
3961 4157
@@ -3964,14 +4160,14 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
3964 if (unlikely(wl->state == WL1271_STATE_OFF)) 4160 if (unlikely(wl->state == WL1271_STATE_OFF))
3965 goto out; 4161 goto out;
3966 4162
3967 if (wl->bss_type != BSS_TYPE_AP_BSS) 4163 if (wlvif->bss_type != BSS_TYPE_AP_BSS)
3968 goto out; 4164 goto out;
3969 4165
3970 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid); 4166 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
3971 4167
3972 wl_sta = (struct wl1271_station *)sta->drv_priv; 4168 wl_sta = (struct wl1271_station *)sta->drv_priv;
3973 id = wl_sta->hlid - WL1271_AP_STA_HLID_START; 4169 id = wl_sta->hlid;
3974 if (WARN_ON(!test_bit(id, wl->ap_hlid_map))) 4170 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
3975 goto out; 4171 goto out;
3976 4172
3977 ret = wl1271_ps_elp_wakeup(wl); 4173 ret = wl1271_ps_elp_wakeup(wl);
@@ -3982,7 +4178,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
3982 if (ret < 0) 4178 if (ret < 0)
3983 goto out_sleep; 4179 goto out_sleep;
3984 4180
3985 wl1271_free_sta(wl, wl_sta->hlid); 4181 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
3986 4182
3987out_sleep: 4183out_sleep:
3988 wl1271_ps_elp_sleep(wl); 4184 wl1271_ps_elp_sleep(wl);
@@ -3999,6 +4195,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
3999 u8 buf_size) 4195 u8 buf_size)
4000{ 4196{
4001 struct wl1271 *wl = hw->priv; 4197 struct wl1271 *wl = hw->priv;
4198 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4002 int ret; 4199 int ret;
4003 u8 hlid, *ba_bitmap; 4200 u8 hlid, *ba_bitmap;
4004 4201
@@ -4016,10 +4213,10 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4016 goto out; 4213 goto out;
4017 } 4214 }
4018 4215
4019 if (wl->bss_type == BSS_TYPE_STA_BSS) { 4216 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4020 hlid = wl->sta_hlid; 4217 hlid = wlvif->sta.hlid;
4021 ba_bitmap = &wl->ba_rx_bitmap; 4218 ba_bitmap = &wlvif->sta.ba_rx_bitmap;
4022 } else if (wl->bss_type == BSS_TYPE_AP_BSS) { 4219 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4023 struct wl1271_station *wl_sta; 4220 struct wl1271_station *wl_sta;
4024 4221
4025 wl_sta = (struct wl1271_station *)sta->drv_priv; 4222 wl_sta = (struct wl1271_station *)sta->drv_priv;
@@ -4039,7 +4236,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4039 4236
4040 switch (action) { 4237 switch (action) {
4041 case IEEE80211_AMPDU_RX_START: 4238 case IEEE80211_AMPDU_RX_START:
4042 if (!wl->ba_support || !wl->ba_allowed) { 4239 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4043 ret = -ENOTSUPP; 4240 ret = -ENOTSUPP;
4044 break; 4241 break;
4045 } 4242 }
@@ -4108,8 +4305,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4108 struct ieee80211_vif *vif, 4305 struct ieee80211_vif *vif,
4109 const struct cfg80211_bitrate_mask *mask) 4306 const struct cfg80211_bitrate_mask *mask)
4110{ 4307{
4308 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4111 struct wl1271 *wl = hw->priv; 4309 struct wl1271 *wl = hw->priv;
4112 int i; 4310 int i, ret = 0;
4113 4311
4114 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x", 4312 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4115 mask->control[NL80211_BAND_2GHZ].legacy, 4313 mask->control[NL80211_BAND_2GHZ].legacy,
@@ -4118,19 +4316,39 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4118 mutex_lock(&wl->mutex); 4316 mutex_lock(&wl->mutex);
4119 4317
4120 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 4318 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
4121 wl->bitrate_masks[i] = 4319 wlvif->bitrate_masks[i] =
4122 wl1271_tx_enabled_rates_get(wl, 4320 wl1271_tx_enabled_rates_get(wl,
4123 mask->control[i].legacy, 4321 mask->control[i].legacy,
4124 i); 4322 i);
4323
4324 if (unlikely(wl->state == WL1271_STATE_OFF))
4325 goto out;
4326
4327 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4328 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4329
4330 ret = wl1271_ps_elp_wakeup(wl);
4331 if (ret < 0)
4332 goto out;
4333
4334 wl1271_set_band_rate(wl, wlvif);
4335 wlvif->basic_rate =
4336 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4337 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4338
4339 wl1271_ps_elp_sleep(wl);
4340 }
4341out:
4125 mutex_unlock(&wl->mutex); 4342 mutex_unlock(&wl->mutex);
4126 4343
4127 return 0; 4344 return ret;
4128} 4345}
4129 4346
4130static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, 4347static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4131 struct ieee80211_channel_switch *ch_switch) 4348 struct ieee80211_channel_switch *ch_switch)
4132{ 4349{
4133 struct wl1271 *wl = hw->priv; 4350 struct wl1271 *wl = hw->priv;
4351 struct wl12xx_vif *wlvif;
4134 int ret; 4352 int ret;
4135 4353
4136 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch"); 4354 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -4138,19 +4356,24 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4138 mutex_lock(&wl->mutex); 4356 mutex_lock(&wl->mutex);
4139 4357
4140 if (unlikely(wl->state == WL1271_STATE_OFF)) { 4358 if (unlikely(wl->state == WL1271_STATE_OFF)) {
4141 mutex_unlock(&wl->mutex); 4359 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4142 ieee80211_chswitch_done(wl->vif, false); 4360 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4143 return; 4361 ieee80211_chswitch_done(vif, false);
4362 }
4363 goto out;
4144 } 4364 }
4145 4365
4146 ret = wl1271_ps_elp_wakeup(wl); 4366 ret = wl1271_ps_elp_wakeup(wl);
4147 if (ret < 0) 4367 if (ret < 0)
4148 goto out; 4368 goto out;
4149 4369
4150 ret = wl12xx_cmd_channel_switch(wl, ch_switch); 4370 /* TODO: change mac80211 to pass vif as param */
4371 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4372 ret = wl12xx_cmd_channel_switch(wl, ch_switch);
4151 4373
4152 if (!ret) 4374 if (!ret)
4153 set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags); 4375 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4376 }
4154 4377
4155 wl1271_ps_elp_sleep(wl); 4378 wl1271_ps_elp_sleep(wl);
4156 4379
@@ -4170,10 +4393,6 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
4170 4393
4171 /* packets are considered pending if in the TX queue or the FW */ 4394 /* packets are considered pending if in the TX queue or the FW */
4172 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0); 4395 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
4173
4174 /* the above is appropriate for STA mode for PS purposes */
4175 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
4176
4177out: 4396out:
4178 mutex_unlock(&wl->mutex); 4397 mutex_unlock(&wl->mutex);
4179 4398
@@ -4604,7 +4823,7 @@ static struct bin_attribute fwlog_attr = {
4604 .read = wl1271_sysfs_read_fwlog, 4823 .read = wl1271_sysfs_read_fwlog,
4605}; 4824};
4606 4825
4607int wl1271_register_hw(struct wl1271 *wl) 4826static int wl1271_register_hw(struct wl1271 *wl)
4608{ 4827{
4609 int ret; 4828 int ret;
4610 4829
@@ -4645,9 +4864,8 @@ int wl1271_register_hw(struct wl1271 *wl)
4645 4864
4646 return 0; 4865 return 0;
4647} 4866}
4648EXPORT_SYMBOL_GPL(wl1271_register_hw);
4649 4867
4650void wl1271_unregister_hw(struct wl1271 *wl) 4868static void wl1271_unregister_hw(struct wl1271 *wl)
4651{ 4869{
4652 if (wl->state == WL1271_STATE_PLT) 4870 if (wl->state == WL1271_STATE_PLT)
4653 __wl1271_plt_stop(wl); 4871 __wl1271_plt_stop(wl);
@@ -4657,9 +4875,8 @@ void wl1271_unregister_hw(struct wl1271 *wl)
4657 wl->mac80211_registered = false; 4875 wl->mac80211_registered = false;
4658 4876
4659} 4877}
4660EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
4661 4878
4662int wl1271_init_ieee80211(struct wl1271 *wl) 4879static int wl1271_init_ieee80211(struct wl1271 *wl)
4663{ 4880{
4664 static const u32 cipher_suites[] = { 4881 static const u32 cipher_suites[] = {
4665 WLAN_CIPHER_SUITE_WEP40, 4882 WLAN_CIPHER_SUITE_WEP40,
@@ -4736,27 +4953,33 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
4736 4953
4737 wl->hw->wiphy->reg_notifier = wl1271_reg_notify; 4954 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
4738 4955
4739 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl)); 4956 /* the FW answers probe-requests in AP-mode */
4957 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
4958 wl->hw->wiphy->probe_resp_offload =
4959 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
4960 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
4961 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
4962
4963 SET_IEEE80211_DEV(wl->hw, wl->dev);
4740 4964
4741 wl->hw->sta_data_size = sizeof(struct wl1271_station); 4965 wl->hw->sta_data_size = sizeof(struct wl1271_station);
4966 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
4742 4967
4743 wl->hw->max_rx_aggregation_subframes = 8; 4968 wl->hw->max_rx_aggregation_subframes = 8;
4744 4969
4745 return 0; 4970 return 0;
4746} 4971}
4747EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
4748 4972
4749#define WL1271_DEFAULT_CHANNEL 0 4973#define WL1271_DEFAULT_CHANNEL 0
4750 4974
4751struct ieee80211_hw *wl1271_alloc_hw(void) 4975static struct ieee80211_hw *wl1271_alloc_hw(void)
4752{ 4976{
4753 struct ieee80211_hw *hw; 4977 struct ieee80211_hw *hw;
4754 struct platform_device *plat_dev = NULL;
4755 struct wl1271 *wl; 4978 struct wl1271 *wl;
4756 int i, j, ret; 4979 int i, j, ret;
4757 unsigned int order; 4980 unsigned int order;
4758 4981
4759 BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS); 4982 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
4760 4983
4761 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 4984 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
4762 if (!hw) { 4985 if (!hw) {
@@ -4765,41 +4988,26 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
4765 goto err_hw_alloc; 4988 goto err_hw_alloc;
4766 } 4989 }
4767 4990
4768 plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL);
4769 if (!plat_dev) {
4770 wl1271_error("could not allocate platform_device");
4771 ret = -ENOMEM;
4772 goto err_plat_alloc;
4773 }
4774
4775 wl = hw->priv; 4991 wl = hw->priv;
4776 memset(wl, 0, sizeof(*wl)); 4992 memset(wl, 0, sizeof(*wl));
4777 4993
4778 INIT_LIST_HEAD(&wl->list); 4994 INIT_LIST_HEAD(&wl->list);
4995 INIT_LIST_HEAD(&wl->wlvif_list);
4779 4996
4780 wl->hw = hw; 4997 wl->hw = hw;
4781 wl->plat_dev = plat_dev;
4782
4783 for (i = 0; i < NUM_TX_QUEUES; i++)
4784 skb_queue_head_init(&wl->tx_queue[i]);
4785 4998
4786 for (i = 0; i < NUM_TX_QUEUES; i++) 4999 for (i = 0; i < NUM_TX_QUEUES; i++)
4787 for (j = 0; j < AP_MAX_LINKS; j++) 5000 for (j = 0; j < WL12XX_MAX_LINKS; j++)
4788 skb_queue_head_init(&wl->links[j].tx_queue[i]); 5001 skb_queue_head_init(&wl->links[j].tx_queue[i]);
4789 5002
4790 skb_queue_head_init(&wl->deferred_rx_queue); 5003 skb_queue_head_init(&wl->deferred_rx_queue);
4791 skb_queue_head_init(&wl->deferred_tx_queue); 5004 skb_queue_head_init(&wl->deferred_tx_queue);
4792 5005
4793 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 5006 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
4794 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
4795 INIT_WORK(&wl->netstack_work, wl1271_netstack_work); 5007 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
4796 INIT_WORK(&wl->tx_work, wl1271_tx_work); 5008 INIT_WORK(&wl->tx_work, wl1271_tx_work);
4797 INIT_WORK(&wl->recovery_work, wl1271_recovery_work); 5009 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
4798 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); 5010 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
4799 INIT_WORK(&wl->rx_streaming_enable_work,
4800 wl1271_rx_streaming_enable_work);
4801 INIT_WORK(&wl->rx_streaming_disable_work,
4802 wl1271_rx_streaming_disable_work);
4803 5011
4804 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq"); 5012 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
4805 if (!wl->freezable_wq) { 5013 if (!wl->freezable_wq) {
@@ -4808,41 +5016,21 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
4808 } 5016 }
4809 5017
4810 wl->channel = WL1271_DEFAULT_CHANNEL; 5018 wl->channel = WL1271_DEFAULT_CHANNEL;
4811 wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
4812 wl->default_key = 0;
4813 wl->rx_counter = 0; 5019 wl->rx_counter = 0;
4814 wl->psm_entry_retry = 0;
4815 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 5020 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
4816 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
4817 wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
4818 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
4819 wl->band = IEEE80211_BAND_2GHZ; 5021 wl->band = IEEE80211_BAND_2GHZ;
4820 wl->vif = NULL; 5022 wl->vif = NULL;
4821 wl->flags = 0; 5023 wl->flags = 0;
4822 wl->sg_enabled = true; 5024 wl->sg_enabled = true;
4823 wl->hw_pg_ver = -1; 5025 wl->hw_pg_ver = -1;
4824 wl->bss_type = MAX_BSS_TYPE;
4825 wl->set_bss_type = MAX_BSS_TYPE;
4826 wl->last_tx_hlid = 0;
4827 wl->ap_ps_map = 0; 5026 wl->ap_ps_map = 0;
4828 wl->ap_fw_ps_map = 0; 5027 wl->ap_fw_ps_map = 0;
4829 wl->quirks = 0; 5028 wl->quirks = 0;
4830 wl->platform_quirks = 0; 5029 wl->platform_quirks = 0;
4831 wl->sched_scanning = false; 5030 wl->sched_scanning = false;
4832 wl->tx_security_seq = 0;
4833 wl->tx_security_last_seq_lsb = 0;
4834 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; 5031 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
4835 wl->role_id = WL12XX_INVALID_ROLE_ID;
4836 wl->system_hlid = WL12XX_SYSTEM_HLID; 5032 wl->system_hlid = WL12XX_SYSTEM_HLID;
4837 wl->sta_hlid = WL12XX_INVALID_LINK_ID;
4838 wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
4839 wl->dev_hlid = WL12XX_INVALID_LINK_ID;
4840 wl->session_counter = 0;
4841 wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
4842 wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
4843 wl->active_sta_count = 0; 5033 wl->active_sta_count = 0;
4844 setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
4845 (unsigned long) wl);
4846 wl->fwlog_size = 0; 5034 wl->fwlog_size = 0;
4847 init_waitqueue_head(&wl->fwlog_waitq); 5035 init_waitqueue_head(&wl->fwlog_waitq);
4848 5036
@@ -4860,8 +5048,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
4860 5048
4861 /* Apply default driver configuration. */ 5049 /* Apply default driver configuration. */
4862 wl1271_conf_init(wl); 5050 wl1271_conf_init(wl);
4863 wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
4864 wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
4865 5051
4866 order = get_order(WL1271_AGGR_BUFFER_SIZE); 5052 order = get_order(WL1271_AGGR_BUFFER_SIZE);
4867 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 5053 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -4883,49 +5069,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
4883 goto err_dummy_packet; 5069 goto err_dummy_packet;
4884 } 5070 }
4885 5071
4886 /* Register platform device */
4887 ret = platform_device_register(wl->plat_dev);
4888 if (ret) {
4889 wl1271_error("couldn't register platform device");
4890 goto err_fwlog;
4891 }
4892 dev_set_drvdata(&wl->plat_dev->dev, wl);
4893
4894 /* Create sysfs file to control bt coex state */
4895 ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
4896 if (ret < 0) {
4897 wl1271_error("failed to create sysfs file bt_coex_state");
4898 goto err_platform;
4899 }
4900
4901 /* Create sysfs file to get HW PG version */
4902 ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
4903 if (ret < 0) {
4904 wl1271_error("failed to create sysfs file hw_pg_ver");
4905 goto err_bt_coex_state;
4906 }
4907
4908 /* Create sysfs file for the FW log */
4909 ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr);
4910 if (ret < 0) {
4911 wl1271_error("failed to create sysfs file fwlog");
4912 goto err_hw_pg_ver;
4913 }
4914
4915 return hw; 5072 return hw;
4916 5073
4917err_hw_pg_ver:
4918 device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
4919
4920err_bt_coex_state:
4921 device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
4922
4923err_platform:
4924 platform_device_unregister(wl->plat_dev);
4925
4926err_fwlog:
4927 free_page((unsigned long)wl->fwlog);
4928
4929err_dummy_packet: 5074err_dummy_packet:
4930 dev_kfree_skb(wl->dummy_packet); 5075 dev_kfree_skb(wl->dummy_packet);
4931 5076
@@ -4937,18 +5082,14 @@ err_wq:
4937 5082
4938err_hw: 5083err_hw:
4939 wl1271_debugfs_exit(wl); 5084 wl1271_debugfs_exit(wl);
4940 kfree(plat_dev);
4941
4942err_plat_alloc:
4943 ieee80211_free_hw(hw); 5085 ieee80211_free_hw(hw);
4944 5086
4945err_hw_alloc: 5087err_hw_alloc:
4946 5088
4947 return ERR_PTR(ret); 5089 return ERR_PTR(ret);
4948} 5090}
4949EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
4950 5091
4951int wl1271_free_hw(struct wl1271 *wl) 5092static int wl1271_free_hw(struct wl1271 *wl)
4952{ 5093{
4953 /* Unblock any fwlog readers */ 5094 /* Unblock any fwlog readers */
4954 mutex_lock(&wl->mutex); 5095 mutex_lock(&wl->mutex);
@@ -4956,17 +5097,15 @@ int wl1271_free_hw(struct wl1271 *wl)
4956 wake_up_interruptible_all(&wl->fwlog_waitq); 5097 wake_up_interruptible_all(&wl->fwlog_waitq);
4957 mutex_unlock(&wl->mutex); 5098 mutex_unlock(&wl->mutex);
4958 5099
4959 device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr); 5100 device_remove_bin_file(wl->dev, &fwlog_attr);
4960 5101
4961 device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver); 5102 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
4962 5103
4963 device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state); 5104 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
4964 platform_device_unregister(wl->plat_dev);
4965 free_page((unsigned long)wl->fwlog); 5105 free_page((unsigned long)wl->fwlog);
4966 dev_kfree_skb(wl->dummy_packet); 5106 dev_kfree_skb(wl->dummy_packet);
4967 free_pages((unsigned long)wl->aggr_buf, 5107 free_pages((unsigned long)wl->aggr_buf,
4968 get_order(WL1271_AGGR_BUFFER_SIZE)); 5108 get_order(WL1271_AGGR_BUFFER_SIZE));
4969 kfree(wl->plat_dev);
4970 5109
4971 wl1271_debugfs_exit(wl); 5110 wl1271_debugfs_exit(wl);
4972 5111
@@ -4983,7 +5122,174 @@ int wl1271_free_hw(struct wl1271 *wl)
4983 5122
4984 return 0; 5123 return 0;
4985} 5124}
4986EXPORT_SYMBOL_GPL(wl1271_free_hw); 5125
5126static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5127{
5128 struct wl1271 *wl = cookie;
5129 unsigned long flags;
5130
5131 wl1271_debug(DEBUG_IRQ, "IRQ");
5132
5133 /* complete the ELP completion */
5134 spin_lock_irqsave(&wl->wl_lock, flags);
5135 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5136 if (wl->elp_compl) {
5137 complete(wl->elp_compl);
5138 wl->elp_compl = NULL;
5139 }
5140
5141 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5142 /* don't enqueue a work right now. mark it as pending */
5143 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5144 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5145 disable_irq_nosync(wl->irq);
5146 pm_wakeup_event(wl->dev, 0);
5147 spin_unlock_irqrestore(&wl->wl_lock, flags);
5148 return IRQ_HANDLED;
5149 }
5150 spin_unlock_irqrestore(&wl->wl_lock, flags);
5151
5152 return IRQ_WAKE_THREAD;
5153}
5154
5155static int __devinit wl12xx_probe(struct platform_device *pdev)
5156{
5157 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5158 struct ieee80211_hw *hw;
5159 struct wl1271 *wl;
5160 unsigned long irqflags;
5161 int ret = -ENODEV;
5162
5163 hw = wl1271_alloc_hw();
5164 if (IS_ERR(hw)) {
5165 wl1271_error("can't allocate hw");
5166 ret = PTR_ERR(hw);
5167 goto out;
5168 }
5169
5170 wl = hw->priv;
5171 wl->irq = platform_get_irq(pdev, 0);
5172 wl->ref_clock = pdata->board_ref_clock;
5173 wl->tcxo_clock = pdata->board_tcxo_clock;
5174 wl->platform_quirks = pdata->platform_quirks;
5175 wl->set_power = pdata->set_power;
5176 wl->dev = &pdev->dev;
5177 wl->if_ops = pdata->ops;
5178
5179 platform_set_drvdata(pdev, wl);
5180
5181 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
5182 irqflags = IRQF_TRIGGER_RISING;
5183 else
5184 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
5185
5186 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
5187 irqflags,
5188 pdev->name, wl);
5189 if (ret < 0) {
5190 wl1271_error("request_irq() failed: %d", ret);
5191 goto out_free_hw;
5192 }
5193
5194 ret = enable_irq_wake(wl->irq);
5195 if (!ret) {
5196 wl->irq_wake_enabled = true;
5197 device_init_wakeup(wl->dev, 1);
5198 if (pdata->pwr_in_suspend)
5199 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5200
5201 }
5202 disable_irq(wl->irq);
5203
5204 ret = wl1271_init_ieee80211(wl);
5205 if (ret)
5206 goto out_irq;
5207
5208 ret = wl1271_register_hw(wl);
5209 if (ret)
5210 goto out_irq;
5211
5212 /* Create sysfs file to control bt coex state */
5213 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
5214 if (ret < 0) {
5215 wl1271_error("failed to create sysfs file bt_coex_state");
5216 goto out_irq;
5217 }
5218
5219 /* Create sysfs file to get HW PG version */
5220 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
5221 if (ret < 0) {
5222 wl1271_error("failed to create sysfs file hw_pg_ver");
5223 goto out_bt_coex_state;
5224 }
5225
5226 /* Create sysfs file for the FW log */
5227 ret = device_create_bin_file(wl->dev, &fwlog_attr);
5228 if (ret < 0) {
5229 wl1271_error("failed to create sysfs file fwlog");
5230 goto out_hw_pg_ver;
5231 }
5232
5233 return 0;
5234
5235out_hw_pg_ver:
5236 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5237
5238out_bt_coex_state:
5239 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5240
5241out_irq:
5242 free_irq(wl->irq, wl);
5243
5244out_free_hw:
5245 wl1271_free_hw(wl);
5246
5247out:
5248 return ret;
5249}
5250
5251static int __devexit wl12xx_remove(struct platform_device *pdev)
5252{
5253 struct wl1271 *wl = platform_get_drvdata(pdev);
5254
5255 if (wl->irq_wake_enabled) {
5256 device_init_wakeup(wl->dev, 0);
5257 disable_irq_wake(wl->irq);
5258 }
5259 wl1271_unregister_hw(wl);
5260 free_irq(wl->irq, wl);
5261 wl1271_free_hw(wl);
5262
5263 return 0;
5264}
5265
5266static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
5267 { "wl12xx", 0 },
5268 { } /* Terminating Entry */
5269};
5270MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
5271
5272static struct platform_driver wl12xx_driver = {
5273 .probe = wl12xx_probe,
5274 .remove = __devexit_p(wl12xx_remove),
5275 .id_table = wl12xx_id_table,
5276 .driver = {
5277 .name = "wl12xx_driver",
5278 .owner = THIS_MODULE,
5279 }
5280};
5281
5282static int __init wl12xx_init(void)
5283{
5284 return platform_driver_register(&wl12xx_driver);
5285}
5286module_init(wl12xx_init);
5287
5288static void __exit wl12xx_exit(void)
5289{
5290 platform_driver_unregister(&wl12xx_driver);
5291}
5292module_exit(wl12xx_exit);
4987 5293
4988u32 wl12xx_debug_level = DEBUG_NONE; 5294u32 wl12xx_debug_level = DEBUG_NONE;
4989EXPORT_SYMBOL_GPL(wl12xx_debug_level); 5295EXPORT_SYMBOL_GPL(wl12xx_debug_level);
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index c15ebf2efd40..a7a11088dd31 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -25,6 +25,7 @@
25#include "ps.h" 25#include "ps.h"
26#include "io.h" 26#include "io.h"
27#include "tx.h" 27#include "tx.h"
28#include "debug.h"
28 29
29#define WL1271_WAKEUP_TIMEOUT 500 30#define WL1271_WAKEUP_TIMEOUT 500
30 31
@@ -32,6 +33,7 @@ void wl1271_elp_work(struct work_struct *work)
32{ 33{
33 struct delayed_work *dwork; 34 struct delayed_work *dwork;
34 struct wl1271 *wl; 35 struct wl1271 *wl;
36 struct wl12xx_vif *wlvif;
35 37
36 dwork = container_of(work, struct delayed_work, work); 38 dwork = container_of(work, struct delayed_work, work);
37 wl = container_of(dwork, struct wl1271, elp_work); 39 wl = container_of(dwork, struct wl1271, elp_work);
@@ -47,11 +49,15 @@ void wl1271_elp_work(struct work_struct *work)
47 if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) 49 if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
48 goto out; 50 goto out;
49 51
50 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) || 52 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
51 (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
52 !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
53 goto out; 53 goto out;
54 54
55 wl12xx_for_each_wlvif(wl, wlvif) {
56 if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
57 !test_bit(WL1271_FLAG_IDLE, &wl->flags))
58 goto out;
59 }
60
55 wl1271_debug(DEBUG_PSM, "chip to elp"); 61 wl1271_debug(DEBUG_PSM, "chip to elp");
56 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); 62 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
57 set_bit(WL1271_FLAG_IN_ELP, &wl->flags); 63 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
@@ -65,13 +71,17 @@ out:
65/* Routines to toggle sleep mode while in ELP */ 71/* Routines to toggle sleep mode while in ELP */
66void wl1271_ps_elp_sleep(struct wl1271 *wl) 72void wl1271_ps_elp_sleep(struct wl1271 *wl)
67{ 73{
74 struct wl12xx_vif *wlvif;
75
68 /* we shouldn't get consecutive sleep requests */ 76 /* we shouldn't get consecutive sleep requests */
69 if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) 77 if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
70 return; 78 return;
71 79
72 if (!test_bit(WL1271_FLAG_PSM, &wl->flags) && 80 wl12xx_for_each_wlvif(wl, wlvif) {
73 !test_bit(WL1271_FLAG_IDLE, &wl->flags)) 81 if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
74 return; 82 !test_bit(WL1271_FLAG_IDLE, &wl->flags))
83 return;
84 }
75 85
76 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 86 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
77 msecs_to_jiffies(ELP_ENTRY_DELAY)); 87 msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -143,8 +153,8 @@ out:
143 return 0; 153 return 0;
144} 154}
145 155
146int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, 156int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
147 u32 rates, bool send) 157 enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
148{ 158{
149 int ret; 159 int ret;
150 160
@@ -152,39 +162,34 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
152 case STATION_POWER_SAVE_MODE: 162 case STATION_POWER_SAVE_MODE:
153 wl1271_debug(DEBUG_PSM, "entering psm"); 163 wl1271_debug(DEBUG_PSM, "entering psm");
154 164
155 ret = wl1271_acx_wake_up_conditions(wl); 165 ret = wl1271_acx_wake_up_conditions(wl, wlvif);
156 if (ret < 0) { 166 if (ret < 0) {
157 wl1271_error("couldn't set wake up conditions"); 167 wl1271_error("couldn't set wake up conditions");
158 return ret; 168 return ret;
159 } 169 }
160 170
161 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); 171 ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
162 if (ret < 0) 172 if (ret < 0)
163 return ret; 173 return ret;
164 174
165 set_bit(WL1271_FLAG_PSM, &wl->flags); 175 set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
166 break; 176 break;
167 case STATION_ACTIVE_MODE: 177 case STATION_ACTIVE_MODE:
168 default: 178 default:
169 wl1271_debug(DEBUG_PSM, "leaving psm"); 179 wl1271_debug(DEBUG_PSM, "leaving psm");
170 180
171 /* disable beacon early termination */ 181 /* disable beacon early termination */
172 if (wl->band == IEEE80211_BAND_2GHZ) { 182 if (wlvif->band == IEEE80211_BAND_2GHZ) {
173 ret = wl1271_acx_bet_enable(wl, false); 183 ret = wl1271_acx_bet_enable(wl, wlvif, false);
174 if (ret < 0) 184 if (ret < 0)
175 return ret; 185 return ret;
176 } 186 }
177 187
178 /* disable beacon filtering */ 188 ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
179 ret = wl1271_acx_beacon_filter_opt(wl, false);
180 if (ret < 0)
181 return ret;
182
183 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
184 if (ret < 0) 189 if (ret < 0)
185 return ret; 190 return ret;
186 191
187 clear_bit(WL1271_FLAG_PSM, &wl->flags); 192 clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
188 break; 193 break;
189 } 194 }
190 195
@@ -223,9 +228,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
223 wl1271_handle_tx_low_watermark(wl); 228 wl1271_handle_tx_low_watermark(wl);
224} 229}
225 230
226void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues) 231void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
232 u8 hlid, bool clean_queues)
227{ 233{
228 struct ieee80211_sta *sta; 234 struct ieee80211_sta *sta;
235 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
229 236
230 if (test_bit(hlid, &wl->ap_ps_map)) 237 if (test_bit(hlid, &wl->ap_ps_map))
231 return; 238 return;
@@ -235,7 +242,7 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
235 clean_queues); 242 clean_queues);
236 243
237 rcu_read_lock(); 244 rcu_read_lock();
238 sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr); 245 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
239 if (!sta) { 246 if (!sta) {
240 wl1271_error("could not find sta %pM for starting ps", 247 wl1271_error("could not find sta %pM for starting ps",
241 wl->links[hlid].addr); 248 wl->links[hlid].addr);
@@ -253,9 +260,10 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
253 __set_bit(hlid, &wl->ap_ps_map); 260 __set_bit(hlid, &wl->ap_ps_map);
254} 261}
255 262
256void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid) 263void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
257{ 264{
258 struct ieee80211_sta *sta; 265 struct ieee80211_sta *sta;
266 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
259 267
260 if (!test_bit(hlid, &wl->ap_ps_map)) 268 if (!test_bit(hlid, &wl->ap_ps_map))
261 return; 269 return;
@@ -265,7 +273,7 @@ void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
265 __clear_bit(hlid, &wl->ap_ps_map); 273 __clear_bit(hlid, &wl->ap_ps_map);
266 274
267 rcu_read_lock(); 275 rcu_read_lock();
268 sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr); 276 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
269 if (!sta) { 277 if (!sta) {
270 wl1271_error("could not find sta %pM for ending ps", 278 wl1271_error("could not find sta %pM for ending ps",
271 wl->links[hlid].addr); 279 wl->links[hlid].addr);
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 25eb9bc9b628..a12052f02026 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -27,13 +27,14 @@
27#include "wl12xx.h" 27#include "wl12xx.h"
28#include "acx.h" 28#include "acx.h"
29 29
30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, 30int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
31 u32 rates, bool send); 31 enum wl1271_cmd_ps_mode mode, u32 rates, bool send);
32void wl1271_ps_elp_sleep(struct wl1271 *wl); 32void wl1271_ps_elp_sleep(struct wl1271 *wl);
33int wl1271_ps_elp_wakeup(struct wl1271 *wl); 33int wl1271_ps_elp_wakeup(struct wl1271 *wl);
34void wl1271_elp_work(struct work_struct *work); 34void wl1271_elp_work(struct work_struct *work);
35void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues); 35void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
36void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid); 36 u8 hlid, bool clean_queues);
37void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
37 38
38#define WL1271_PS_COMPLETE_TIMEOUT 500 39#define WL1271_PS_COMPLETE_TIMEOUT 500
39 40
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 3f570f397586..df34d5977b98 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -408,7 +408,7 @@
408 408
409 409
410/* Firmware image load chunk size */ 410/* Firmware image load chunk size */
411#define CHUNK_SIZE 512 411#define CHUNK_SIZE 16384
412 412
413/* Firmware image header size */ 413/* Firmware image header size */
414#define FW_HDR_SIZE 8 414#define FW_HDR_SIZE 8
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index dee4cfe9ccc1..4fbd2a722ffa 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -25,9 +25,11 @@
25#include <linux/sched.h> 25#include <linux/sched.h>
26 26
27#include "wl12xx.h" 27#include "wl12xx.h"
28#include "debug.h"
28#include "acx.h" 29#include "acx.h"
29#include "reg.h" 30#include "reg.h"
30#include "rx.h" 31#include "rx.h"
32#include "tx.h"
31#include "io.h" 33#include "io.h"
32 34
33static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status, 35static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
@@ -96,7 +98,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
96} 98}
97 99
98static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, 100static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
99 bool unaligned) 101 bool unaligned, u8 *hlid)
100{ 102{
101 struct wl1271_rx_descriptor *desc; 103 struct wl1271_rx_descriptor *desc;
102 struct sk_buff *skb; 104 struct sk_buff *skb;
@@ -159,6 +161,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
159 * payload aligned to 4 bytes. 161 * payload aligned to 4 bytes.
160 */ 162 */
161 memcpy(buf, data + sizeof(*desc), length - sizeof(*desc)); 163 memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
164 *hlid = desc->hlid;
162 165
163 hdr = (struct ieee80211_hdr *)skb->data; 166 hdr = (struct ieee80211_hdr *)skb->data;
164 if (ieee80211_is_beacon(hdr->frame_control)) 167 if (ieee80211_is_beacon(hdr->frame_control))
@@ -169,10 +172,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
169 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); 172 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
170 173
171 seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; 174 seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
172 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb, 175 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
173 skb->len - desc->pad_len, 176 skb->len - desc->pad_len,
174 beacon ? "beacon" : "", 177 beacon ? "beacon" : "",
175 seq_num); 178 seq_num, *hlid);
176 179
177 skb_trim(skb, skb->len - desc->pad_len); 180 skb_trim(skb, skb->len - desc->pad_len);
178 181
@@ -185,6 +188,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
185void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) 188void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
186{ 189{
187 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; 190 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
191 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
188 u32 buf_size; 192 u32 buf_size;
189 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 193 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
190 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 194 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
@@ -192,8 +196,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
192 u32 mem_block; 196 u32 mem_block;
193 u32 pkt_length; 197 u32 pkt_length;
194 u32 pkt_offset; 198 u32 pkt_offset;
195 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 199 u8 hlid;
196 bool had_data = false;
197 bool unaligned = false; 200 bool unaligned = false;
198 201
199 while (drv_rx_counter != fw_rx_counter) { 202 while (drv_rx_counter != fw_rx_counter) {
@@ -253,8 +256,15 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
253 */ 256 */
254 if (wl1271_rx_handle_data(wl, 257 if (wl1271_rx_handle_data(wl,
255 wl->aggr_buf + pkt_offset, 258 wl->aggr_buf + pkt_offset,
256 pkt_length, unaligned) == 1) 259 pkt_length, unaligned,
257 had_data = true; 260 &hlid) == 1) {
261 if (hlid < WL12XX_MAX_LINKS)
262 __set_bit(hlid, active_hlids);
263 else
264 WARN(1,
265 "hlid exceeded WL12XX_MAX_LINKS "
266 "(%d)\n", hlid);
267 }
258 268
259 wl->rx_counter++; 269 wl->rx_counter++;
260 drv_rx_counter++; 270 drv_rx_counter++;
@@ -270,17 +280,5 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
270 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) 280 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
271 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 281 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
272 282
273 if (!is_ap && wl->conf.rx_streaming.interval && had_data && 283 wl12xx_rearm_rx_streaming(wl, active_hlids);
274 (wl->conf.rx_streaming.always ||
275 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
276 u32 timeout = wl->conf.rx_streaming.duration;
277
278 /* restart rx streaming */
279 if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
280 ieee80211_queue_work(wl->hw,
281 &wl->rx_streaming_enable_work);
282
283 mod_timer(&wl->rx_streaming_timer,
284 jiffies + msecs_to_jiffies(timeout));
285 }
286} 284}
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index fc29c671cf3b..8599dab1fe2a 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -24,6 +24,7 @@
24#include <linux/ieee80211.h> 24#include <linux/ieee80211.h>
25 25
26#include "wl12xx.h" 26#include "wl12xx.h"
27#include "debug.h"
27#include "cmd.h" 28#include "cmd.h"
28#include "scan.h" 29#include "scan.h"
29#include "acx.h" 30#include "acx.h"
@@ -34,6 +35,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
34{ 35{
35 struct delayed_work *dwork; 36 struct delayed_work *dwork;
36 struct wl1271 *wl; 37 struct wl1271 *wl;
38 struct ieee80211_vif *vif;
39 struct wl12xx_vif *wlvif;
37 int ret; 40 int ret;
38 bool is_sta, is_ibss; 41 bool is_sta, is_ibss;
39 42
@@ -50,28 +53,31 @@ void wl1271_scan_complete_work(struct work_struct *work)
50 if (wl->scan.state == WL1271_SCAN_STATE_IDLE) 53 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
51 goto out; 54 goto out;
52 55
56 vif = wl->scan_vif;
57 wlvif = wl12xx_vif_to_data(vif);
58
53 wl->scan.state = WL1271_SCAN_STATE_IDLE; 59 wl->scan.state = WL1271_SCAN_STATE_IDLE;
54 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 60 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
55 wl->scan.req = NULL; 61 wl->scan.req = NULL;
62 wl->scan_vif = NULL;
56 63
57 ret = wl1271_ps_elp_wakeup(wl); 64 ret = wl1271_ps_elp_wakeup(wl);
58 if (ret < 0) 65 if (ret < 0)
59 goto out; 66 goto out;
60 67
61 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { 68 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
62 /* restore hardware connection monitoring template */ 69 /* restore hardware connection monitoring template */
63 wl1271_cmd_build_ap_probe_req(wl, wl->probereq); 70 wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
64 } 71 }
65 72
66 /* return to ROC if needed */ 73 /* return to ROC if needed */
67 is_sta = (wl->bss_type == BSS_TYPE_STA_BSS); 74 is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS);
68 is_ibss = (wl->bss_type == BSS_TYPE_IBSS); 75 is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
69 if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) || 76 if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
70 (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) && 77 (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) &&
71 !test_bit(wl->dev_role_id, wl->roc_map)) { 78 !test_bit(wlvif->dev_role_id, wl->roc_map)) {
72 /* restore remain on channel */ 79 /* restore remain on channel */
73 wl12xx_cmd_role_start_dev(wl); 80 wl12xx_start_dev(wl, wlvif);
74 wl12xx_roc(wl, wl->dev_role_id);
75 } 81 }
76 wl1271_ps_elp_sleep(wl); 82 wl1271_ps_elp_sleep(wl);
77 83
@@ -155,9 +161,11 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
155 161
156#define WL1271_NOTHING_TO_SCAN 1 162#define WL1271_NOTHING_TO_SCAN 1
157 163
158static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band, 164static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
159 bool passive, u32 basic_rate) 165 enum ieee80211_band band,
166 bool passive, u32 basic_rate)
160{ 167{
168 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
161 struct wl1271_cmd_scan *cmd; 169 struct wl1271_cmd_scan *cmd;
162 struct wl1271_cmd_trigger_scan_to *trigger; 170 struct wl1271_cmd_trigger_scan_to *trigger;
163 int ret; 171 int ret;
@@ -177,11 +185,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
177 if (passive) 185 if (passive)
178 scan_options |= WL1271_SCAN_OPT_PASSIVE; 186 scan_options |= WL1271_SCAN_OPT_PASSIVE;
179 187
180 if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) { 188 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) {
181 ret = -EINVAL; 189 ret = -EINVAL;
182 goto out; 190 goto out;
183 } 191 }
184 cmd->params.role_id = wl->role_id; 192 cmd->params.role_id = wlvif->role_id;
185 cmd->params.scan_options = cpu_to_le16(scan_options); 193 cmd->params.scan_options = cpu_to_le16(scan_options);
186 194
187 cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req, 195 cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -194,7 +202,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
194 202
195 cmd->params.tx_rate = cpu_to_le32(basic_rate); 203 cmd->params.tx_rate = cpu_to_le32(basic_rate);
196 cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs; 204 cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
197 cmd->params.tx_rate = cpu_to_le32(basic_rate);
198 cmd->params.tid_trigger = 0; 205 cmd->params.tid_trigger = 0;
199 cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; 206 cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
200 207
@@ -208,11 +215,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
208 memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); 215 memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
209 } 216 }
210 217
211 memcpy(cmd->addr, wl->mac_addr, ETH_ALEN); 218 memcpy(cmd->addr, vif->addr, ETH_ALEN);
212 219
213 ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len, 220 ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid,
214 wl->scan.req->ie, wl->scan.req->ie_len, 221 wl->scan.ssid_len, wl->scan.req->ie,
215 band); 222 wl->scan.req->ie_len, band);
216 if (ret < 0) { 223 if (ret < 0) {
217 wl1271_error("PROBE request template failed"); 224 wl1271_error("PROBE request template failed");
218 goto out; 225 goto out;
@@ -241,11 +248,12 @@ out:
241 return ret; 248 return ret;
242} 249}
243 250
244void wl1271_scan_stm(struct wl1271 *wl) 251void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
245{ 252{
253 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
246 int ret = 0; 254 int ret = 0;
247 enum ieee80211_band band; 255 enum ieee80211_band band;
248 u32 rate; 256 u32 rate, mask;
249 257
250 switch (wl->scan.state) { 258 switch (wl->scan.state) {
251 case WL1271_SCAN_STATE_IDLE: 259 case WL1271_SCAN_STATE_IDLE:
@@ -253,47 +261,59 @@ void wl1271_scan_stm(struct wl1271 *wl)
253 261
254 case WL1271_SCAN_STATE_2GHZ_ACTIVE: 262 case WL1271_SCAN_STATE_2GHZ_ACTIVE:
255 band = IEEE80211_BAND_2GHZ; 263 band = IEEE80211_BAND_2GHZ;
256 rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); 264 mask = wlvif->bitrate_masks[band];
257 ret = wl1271_scan_send(wl, band, false, rate); 265 if (wl->scan.req->no_cck) {
266 mask &= ~CONF_TX_CCK_RATES;
267 if (!mask)
268 mask = CONF_TX_RATE_MASK_BASIC_P2P;
269 }
270 rate = wl1271_tx_min_rate_get(wl, mask);
271 ret = wl1271_scan_send(wl, vif, band, false, rate);
258 if (ret == WL1271_NOTHING_TO_SCAN) { 272 if (ret == WL1271_NOTHING_TO_SCAN) {
259 wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE; 273 wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
260 wl1271_scan_stm(wl); 274 wl1271_scan_stm(wl, vif);
261 } 275 }
262 276
263 break; 277 break;
264 278
265 case WL1271_SCAN_STATE_2GHZ_PASSIVE: 279 case WL1271_SCAN_STATE_2GHZ_PASSIVE:
266 band = IEEE80211_BAND_2GHZ; 280 band = IEEE80211_BAND_2GHZ;
267 rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); 281 mask = wlvif->bitrate_masks[band];
268 ret = wl1271_scan_send(wl, band, true, rate); 282 if (wl->scan.req->no_cck) {
283 mask &= ~CONF_TX_CCK_RATES;
284 if (!mask)
285 mask = CONF_TX_RATE_MASK_BASIC_P2P;
286 }
287 rate = wl1271_tx_min_rate_get(wl, mask);
288 ret = wl1271_scan_send(wl, vif, band, true, rate);
269 if (ret == WL1271_NOTHING_TO_SCAN) { 289 if (ret == WL1271_NOTHING_TO_SCAN) {
270 if (wl->enable_11a) 290 if (wl->enable_11a)
271 wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE; 291 wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
272 else 292 else
273 wl->scan.state = WL1271_SCAN_STATE_DONE; 293 wl->scan.state = WL1271_SCAN_STATE_DONE;
274 wl1271_scan_stm(wl); 294 wl1271_scan_stm(wl, vif);
275 } 295 }
276 296
277 break; 297 break;
278 298
279 case WL1271_SCAN_STATE_5GHZ_ACTIVE: 299 case WL1271_SCAN_STATE_5GHZ_ACTIVE:
280 band = IEEE80211_BAND_5GHZ; 300 band = IEEE80211_BAND_5GHZ;
281 rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); 301 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
282 ret = wl1271_scan_send(wl, band, false, rate); 302 ret = wl1271_scan_send(wl, vif, band, false, rate);
283 if (ret == WL1271_NOTHING_TO_SCAN) { 303 if (ret == WL1271_NOTHING_TO_SCAN) {
284 wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE; 304 wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
285 wl1271_scan_stm(wl); 305 wl1271_scan_stm(wl, vif);
286 } 306 }
287 307
288 break; 308 break;
289 309
290 case WL1271_SCAN_STATE_5GHZ_PASSIVE: 310 case WL1271_SCAN_STATE_5GHZ_PASSIVE:
291 band = IEEE80211_BAND_5GHZ; 311 band = IEEE80211_BAND_5GHZ;
292 rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]); 312 rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
293 ret = wl1271_scan_send(wl, band, true, rate); 313 ret = wl1271_scan_send(wl, vif, band, true, rate);
294 if (ret == WL1271_NOTHING_TO_SCAN) { 314 if (ret == WL1271_NOTHING_TO_SCAN) {
295 wl->scan.state = WL1271_SCAN_STATE_DONE; 315 wl->scan.state = WL1271_SCAN_STATE_DONE;
296 wl1271_scan_stm(wl); 316 wl1271_scan_stm(wl, vif);
297 } 317 }
298 318
299 break; 319 break;
@@ -317,7 +337,8 @@ void wl1271_scan_stm(struct wl1271 *wl)
317 } 337 }
318} 338}
319 339
320int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, 340int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
341 const u8 *ssid, size_t ssid_len,
321 struct cfg80211_scan_request *req) 342 struct cfg80211_scan_request *req)
322{ 343{
323 /* 344 /*
@@ -338,6 +359,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
338 wl->scan.ssid_len = 0; 359 wl->scan.ssid_len = 0;
339 } 360 }
340 361
362 wl->scan_vif = vif;
341 wl->scan.req = req; 363 wl->scan.req = req;
342 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 364 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
343 365
@@ -346,7 +368,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
346 ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work, 368 ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
347 msecs_to_jiffies(WL1271_SCAN_TIMEOUT)); 369 msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
348 370
349 wl1271_scan_stm(wl); 371 wl1271_scan_stm(wl, vif);
350 372
351 return 0; 373 return 0;
352} 374}
@@ -550,6 +572,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
550 * so they're used in probe requests. 572 * so they're used in probe requests.
551 */ 573 */
552 for (i = 0; i < req->n_ssids; i++) { 574 for (i = 0; i < req->n_ssids; i++) {
575 if (!req->ssids[i].ssid_len)
576 continue;
577
553 for (j = 0; j < cmd->n_ssids; j++) 578 for (j = 0; j < cmd->n_ssids; j++)
554 if (!memcmp(req->ssids[i].ssid, 579 if (!memcmp(req->ssids[i].ssid,
555 cmd->ssids[j].ssid, 580 cmd->ssids[j].ssid,
@@ -585,6 +610,7 @@ out:
585} 610}
586 611
587int wl1271_scan_sched_scan_config(struct wl1271 *wl, 612int wl1271_scan_sched_scan_config(struct wl1271 *wl,
613 struct wl12xx_vif *wlvif,
588 struct cfg80211_sched_scan_request *req, 614 struct cfg80211_sched_scan_request *req,
589 struct ieee80211_sched_scan_ies *ies) 615 struct ieee80211_sched_scan_ies *ies)
590{ 616{
@@ -631,7 +657,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
631 } 657 }
632 658
633 if (!force_passive && cfg->active[0]) { 659 if (!force_passive && cfg->active[0]) {
634 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, 660 ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
635 req->ssids[0].ssid_len, 661 req->ssids[0].ssid_len,
636 ies->ie[IEEE80211_BAND_2GHZ], 662 ies->ie[IEEE80211_BAND_2GHZ],
637 ies->len[IEEE80211_BAND_2GHZ], 663 ies->len[IEEE80211_BAND_2GHZ],
@@ -643,7 +669,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
643 } 669 }
644 670
645 if (!force_passive && cfg->active[1]) { 671 if (!force_passive && cfg->active[1]) {
646 ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid, 672 ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
647 req->ssids[0].ssid_len, 673 req->ssids[0].ssid_len,
648 ies->ie[IEEE80211_BAND_5GHZ], 674 ies->ie[IEEE80211_BAND_5GHZ],
649 ies->len[IEEE80211_BAND_5GHZ], 675 ies->len[IEEE80211_BAND_5GHZ],
@@ -667,14 +693,14 @@ out:
667 return ret; 693 return ret;
668} 694}
669 695
670int wl1271_scan_sched_scan_start(struct wl1271 *wl) 696int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
671{ 697{
672 struct wl1271_cmd_sched_scan_start *start; 698 struct wl1271_cmd_sched_scan_start *start;
673 int ret = 0; 699 int ret = 0;
674 700
675 wl1271_debug(DEBUG_CMD, "cmd periodic scan start"); 701 wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
676 702
677 if (wl->bss_type != BSS_TYPE_STA_BSS) 703 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
678 return -EOPNOTSUPP; 704 return -EOPNOTSUPP;
679 705
680 if (!test_bit(WL1271_FLAG_IDLE, &wl->flags)) 706 if (!test_bit(WL1271_FLAG_IDLE, &wl->flags))
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index 92115156522f..a7ed43dc08c9 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -26,18 +26,20 @@
26 26
27#include "wl12xx.h" 27#include "wl12xx.h"
28 28
29int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, 29int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
30 const u8 *ssid, size_t ssid_len,
30 struct cfg80211_scan_request *req); 31 struct cfg80211_scan_request *req);
31int wl1271_scan_stop(struct wl1271 *wl); 32int wl1271_scan_stop(struct wl1271 *wl);
32int wl1271_scan_build_probe_req(struct wl1271 *wl, 33int wl1271_scan_build_probe_req(struct wl1271 *wl,
33 const u8 *ssid, size_t ssid_len, 34 const u8 *ssid, size_t ssid_len,
34 const u8 *ie, size_t ie_len, u8 band); 35 const u8 *ie, size_t ie_len, u8 band);
35void wl1271_scan_stm(struct wl1271 *wl); 36void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
36void wl1271_scan_complete_work(struct work_struct *work); 37void wl1271_scan_complete_work(struct work_struct *work);
37int wl1271_scan_sched_scan_config(struct wl1271 *wl, 38int wl1271_scan_sched_scan_config(struct wl1271 *wl,
39 struct wl12xx_vif *wlvif,
38 struct cfg80211_sched_scan_request *req, 40 struct cfg80211_sched_scan_request *req,
39 struct ieee80211_sched_scan_ies *ies); 41 struct ieee80211_sched_scan_ies *ies);
40int wl1271_scan_sched_scan_start(struct wl1271 *wl); 42int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
41void wl1271_scan_sched_scan_stop(struct wl1271 *wl); 43void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
42void wl1271_scan_sched_scan_results(struct wl1271 *wl); 44void wl1271_scan_sched_scan_results(struct wl1271 *wl);
43 45
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 516a8980723c..468a50553fac 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/platform_device.h>
27#include <linux/mmc/sdio_func.h> 28#include <linux/mmc/sdio_func.h>
28#include <linux/mmc/sdio_ids.h> 29#include <linux/mmc/sdio_ids.h>
29#include <linux/mmc/card.h> 30#include <linux/mmc/card.h>
@@ -44,107 +45,67 @@
44#define SDIO_DEVICE_ID_TI_WL1271 0x4076 45#define SDIO_DEVICE_ID_TI_WL1271 0x4076
45#endif 46#endif
46 47
48struct wl12xx_sdio_glue {
49 struct device *dev;
50 struct platform_device *core;
51};
52
47static const struct sdio_device_id wl1271_devices[] __devinitconst = { 53static const struct sdio_device_id wl1271_devices[] __devinitconst = {
48 { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) }, 54 { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
49 {} 55 {}
50}; 56};
51MODULE_DEVICE_TABLE(sdio, wl1271_devices); 57MODULE_DEVICE_TABLE(sdio, wl1271_devices);
52 58
53static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz) 59static void wl1271_sdio_set_block_size(struct device *child,
54{ 60 unsigned int blksz)
55 sdio_claim_host(wl->if_priv);
56 sdio_set_block_size(wl->if_priv, blksz);
57 sdio_release_host(wl->if_priv);
58}
59
60static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
61{
62 return wl->if_priv;
63}
64
65static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
66{
67 return &(wl_to_func(wl)->dev);
68}
69
70static irqreturn_t wl1271_hardirq(int irq, void *cookie)
71{ 61{
72 struct wl1271 *wl = cookie; 62 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
73 unsigned long flags; 63 struct sdio_func *func = dev_to_sdio_func(glue->dev);
74 64
75 wl1271_debug(DEBUG_IRQ, "IRQ"); 65 sdio_claim_host(func);
76 66 sdio_set_block_size(func, blksz);
77 /* complete the ELP completion */ 67 sdio_release_host(func);
78 spin_lock_irqsave(&wl->wl_lock, flags);
79 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
80 if (wl->elp_compl) {
81 complete(wl->elp_compl);
82 wl->elp_compl = NULL;
83 }
84
85 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
86 /* don't enqueue a work right now. mark it as pending */
87 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
88 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
89 disable_irq_nosync(wl->irq);
90 pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0);
91 spin_unlock_irqrestore(&wl->wl_lock, flags);
92 return IRQ_HANDLED;
93 }
94 spin_unlock_irqrestore(&wl->wl_lock, flags);
95
96 return IRQ_WAKE_THREAD;
97}
98
99static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
100{
101 disable_irq(wl->irq);
102}
103
104static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
105{
106 enable_irq(wl->irq);
107} 68}
108 69
109static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf, 70static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
110 size_t len, bool fixed) 71 size_t len, bool fixed)
111{ 72{
112 int ret; 73 int ret;
113 struct sdio_func *func = wl_to_func(wl); 74 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
75 struct sdio_func *func = dev_to_sdio_func(glue->dev);
114 76
115 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 77 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
116 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); 78 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
117 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", 79 dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
118 addr, ((u8 *)buf)[0]); 80 addr, ((u8 *)buf)[0]);
119 } else { 81 } else {
120 if (fixed) 82 if (fixed)
121 ret = sdio_readsb(func, buf, addr, len); 83 ret = sdio_readsb(func, buf, addr, len);
122 else 84 else
123 ret = sdio_memcpy_fromio(func, buf, addr, len); 85 ret = sdio_memcpy_fromio(func, buf, addr, len);
124 86
125 wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes", 87 dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
126 addr, len); 88 addr, len);
127 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
128 } 89 }
129 90
130 if (ret) 91 if (ret)
131 wl1271_error("sdio read failed (%d)", ret); 92 dev_err(child->parent, "sdio read failed (%d)\n", ret);
132} 93}
133 94
134static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, 95static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
135 size_t len, bool fixed) 96 size_t len, bool fixed)
136{ 97{
137 int ret; 98 int ret;
138 struct sdio_func *func = wl_to_func(wl); 99 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
100 struct sdio_func *func = dev_to_sdio_func(glue->dev);
139 101
140 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 102 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
141 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); 103 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
142 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", 104 dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
143 addr, ((u8 *)buf)[0]); 105 addr, ((u8 *)buf)[0]);
144 } else { 106 } else {
145 wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes", 107 dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
146 addr, len); 108 addr, len);
147 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
148 109
149 if (fixed) 110 if (fixed)
150 ret = sdio_writesb(func, addr, buf, len); 111 ret = sdio_writesb(func, addr, buf, len);
@@ -153,13 +114,13 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
153 } 114 }
154 115
155 if (ret) 116 if (ret)
156 wl1271_error("sdio write failed (%d)", ret); 117 dev_err(child->parent, "sdio write failed (%d)\n", ret);
157} 118}
158 119
159static int wl1271_sdio_power_on(struct wl1271 *wl) 120static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
160{ 121{
161 struct sdio_func *func = wl_to_func(wl);
162 int ret; 122 int ret;
123 struct sdio_func *func = dev_to_sdio_func(glue->dev);
163 124
164 /* If enabled, tell runtime PM not to power off the card */ 125 /* If enabled, tell runtime PM not to power off the card */
165 if (pm_runtime_enabled(&func->dev)) { 126 if (pm_runtime_enabled(&func->dev)) {
@@ -180,10 +141,10 @@ out:
180 return ret; 141 return ret;
181} 142}
182 143
183static int wl1271_sdio_power_off(struct wl1271 *wl) 144static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
184{ 145{
185 struct sdio_func *func = wl_to_func(wl);
186 int ret; 146 int ret;
147 struct sdio_func *func = dev_to_sdio_func(glue->dev);
187 148
188 sdio_disable_func(func); 149 sdio_disable_func(func);
189 sdio_release_host(func); 150 sdio_release_host(func);
@@ -200,46 +161,43 @@ static int wl1271_sdio_power_off(struct wl1271 *wl)
200 return ret; 161 return ret;
201} 162}
202 163
203static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable) 164static int wl12xx_sdio_set_power(struct device *child, bool enable)
204{ 165{
166 struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
167
205 if (enable) 168 if (enable)
206 return wl1271_sdio_power_on(wl); 169 return wl12xx_sdio_power_on(glue);
207 else 170 else
208 return wl1271_sdio_power_off(wl); 171 return wl12xx_sdio_power_off(glue);
209} 172}
210 173
211static struct wl1271_if_operations sdio_ops = { 174static struct wl1271_if_operations sdio_ops = {
212 .read = wl1271_sdio_raw_read, 175 .read = wl12xx_sdio_raw_read,
213 .write = wl1271_sdio_raw_write, 176 .write = wl12xx_sdio_raw_write,
214 .power = wl1271_sdio_set_power, 177 .power = wl12xx_sdio_set_power,
215 .dev = wl1271_sdio_wl_to_dev,
216 .enable_irq = wl1271_sdio_enable_interrupts,
217 .disable_irq = wl1271_sdio_disable_interrupts,
218 .set_block_size = wl1271_sdio_set_block_size, 178 .set_block_size = wl1271_sdio_set_block_size,
219}; 179};
220 180
221static int __devinit wl1271_probe(struct sdio_func *func, 181static int __devinit wl1271_probe(struct sdio_func *func,
222 const struct sdio_device_id *id) 182 const struct sdio_device_id *id)
223{ 183{
224 struct ieee80211_hw *hw; 184 struct wl12xx_platform_data *wlan_data;
225 const struct wl12xx_platform_data *wlan_data; 185 struct wl12xx_sdio_glue *glue;
226 struct wl1271 *wl; 186 struct resource res[1];
227 unsigned long irqflags;
228 mmc_pm_flag_t mmcflags; 187 mmc_pm_flag_t mmcflags;
229 int ret; 188 int ret = -ENOMEM;
230 189
231 /* We are only able to handle the wlan function */ 190 /* We are only able to handle the wlan function */
232 if (func->num != 0x02) 191 if (func->num != 0x02)
233 return -ENODEV; 192 return -ENODEV;
234 193
235 hw = wl1271_alloc_hw(); 194 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
236 if (IS_ERR(hw)) 195 if (!glue) {
237 return PTR_ERR(hw); 196 dev_err(&func->dev, "can't allocate glue\n");
238 197 goto out;
239 wl = hw->priv; 198 }
240 199
241 wl->if_priv = func; 200 glue->dev = &func->dev;
242 wl->if_ops = &sdio_ops;
243 201
244 /* Grab access to FN0 for ELP reg. */ 202 /* Grab access to FN0 for ELP reg. */
245 func->card->quirks |= MMC_QUIRK_LENIENT_FN0; 203 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
@@ -250,80 +208,79 @@ static int __devinit wl1271_probe(struct sdio_func *func,
250 wlan_data = wl12xx_get_platform_data(); 208 wlan_data = wl12xx_get_platform_data();
251 if (IS_ERR(wlan_data)) { 209 if (IS_ERR(wlan_data)) {
252 ret = PTR_ERR(wlan_data); 210 ret = PTR_ERR(wlan_data);
253 wl1271_error("missing wlan platform data: %d", ret); 211 dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
254 goto out_free; 212 goto out_free_glue;
255 } 213 }
256 214
257 wl->irq = wlan_data->irq; 215 /* if sdio can keep power while host is suspended, enable wow */
258 wl->ref_clock = wlan_data->board_ref_clock; 216 mmcflags = sdio_get_host_pm_caps(func);
259 wl->tcxo_clock = wlan_data->board_tcxo_clock; 217 dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
260 wl->platform_quirks = wlan_data->platform_quirks;
261 218
262 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) 219 if (mmcflags & MMC_PM_KEEP_POWER)
263 irqflags = IRQF_TRIGGER_RISING; 220 wlan_data->pwr_in_suspend = true;
264 else 221
265 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT; 222 wlan_data->ops = &sdio_ops;
266
267 ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
268 irqflags,
269 DRIVER_NAME, wl);
270 if (ret < 0) {
271 wl1271_error("request_irq() failed: %d", ret);
272 goto out_free;
273 }
274 223
275 ret = enable_irq_wake(wl->irq); 224 sdio_set_drvdata(func, glue);
276 if (!ret) {
277 wl->irq_wake_enabled = true;
278 device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
279 225
280 /* if sdio can keep power while host is suspended, enable wow */ 226 /* Tell PM core that we don't need the card to be powered now */
281 mmcflags = sdio_get_host_pm_caps(func); 227 pm_runtime_put_noidle(&func->dev);
282 wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
283 228
284 if (mmcflags & MMC_PM_KEEP_POWER) 229 glue->core = platform_device_alloc("wl12xx", -1);
285 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; 230 if (!glue->core) {
231 dev_err(glue->dev, "can't allocate platform_device");
232 ret = -ENOMEM;
233 goto out_free_glue;
286 } 234 }
287 disable_irq(wl->irq);
288 235
289 ret = wl1271_init_ieee80211(wl); 236 glue->core->dev.parent = &func->dev;
290 if (ret)
291 goto out_irq;
292 237
293 ret = wl1271_register_hw(wl); 238 memset(res, 0x00, sizeof(res));
294 if (ret)
295 goto out_irq;
296 239
297 sdio_set_drvdata(func, wl); 240 res[0].start = wlan_data->irq;
241 res[0].flags = IORESOURCE_IRQ;
242 res[0].name = "irq";
298 243
299 /* Tell PM core that we don't need the card to be powered now */ 244 ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
300 pm_runtime_put_noidle(&func->dev); 245 if (ret) {
246 dev_err(glue->dev, "can't add resources\n");
247 goto out_dev_put;
248 }
301 249
250 ret = platform_device_add_data(glue->core, wlan_data,
251 sizeof(*wlan_data));
252 if (ret) {
253 dev_err(glue->dev, "can't add platform data\n");
254 goto out_dev_put;
255 }
256
257 ret = platform_device_add(glue->core);
258 if (ret) {
259 dev_err(glue->dev, "can't add platform device\n");
260 goto out_dev_put;
261 }
302 return 0; 262 return 0;
303 263
304 out_irq: 264out_dev_put:
305 free_irq(wl->irq, wl); 265 platform_device_put(glue->core);
306 266
307 out_free: 267out_free_glue:
308 wl1271_free_hw(wl); 268 kfree(glue);
309 269
270out:
310 return ret; 271 return ret;
311} 272}
312 273
313static void __devexit wl1271_remove(struct sdio_func *func) 274static void __devexit wl1271_remove(struct sdio_func *func)
314{ 275{
315 struct wl1271 *wl = sdio_get_drvdata(func); 276 struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
316 277
317 /* Undo decrement done above in wl1271_probe */ 278 /* Undo decrement done above in wl1271_probe */
318 pm_runtime_get_noresume(&func->dev); 279 pm_runtime_get_noresume(&func->dev);
319 280
320 wl1271_unregister_hw(wl); 281 platform_device_del(glue->core);
321 if (wl->irq_wake_enabled) { 282 platform_device_put(glue->core);
322 device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0); 283 kfree(glue);
323 disable_irq_wake(wl->irq);
324 }
325 free_irq(wl->irq, wl);
326 wl1271_free_hw(wl);
327} 284}
328 285
329#ifdef CONFIG_PM 286#ifdef CONFIG_PM
@@ -332,20 +289,21 @@ static int wl1271_suspend(struct device *dev)
332 /* Tell MMC/SDIO core it's OK to power down the card 289 /* Tell MMC/SDIO core it's OK to power down the card
333 * (if it isn't already), but not to remove it completely */ 290 * (if it isn't already), but not to remove it completely */
334 struct sdio_func *func = dev_to_sdio_func(dev); 291 struct sdio_func *func = dev_to_sdio_func(dev);
335 struct wl1271 *wl = sdio_get_drvdata(func); 292 struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
293 struct wl1271 *wl = platform_get_drvdata(glue->core);
336 mmc_pm_flag_t sdio_flags; 294 mmc_pm_flag_t sdio_flags;
337 int ret = 0; 295 int ret = 0;
338 296
339 wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d", 297 dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
340 wl->wow_enabled); 298 wl->wow_enabled);
341 299
342 /* check whether sdio should keep power */ 300 /* check whether sdio should keep power */
343 if (wl->wow_enabled) { 301 if (wl->wow_enabled) {
344 sdio_flags = sdio_get_host_pm_caps(func); 302 sdio_flags = sdio_get_host_pm_caps(func);
345 303
346 if (!(sdio_flags & MMC_PM_KEEP_POWER)) { 304 if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
347 wl1271_error("can't keep power while host " 305 dev_err(dev, "can't keep power while host "
348 "is suspended"); 306 "is suspended\n");
349 ret = -EINVAL; 307 ret = -EINVAL;
350 goto out; 308 goto out;
351 } 309 }
@@ -353,7 +311,7 @@ static int wl1271_suspend(struct device *dev)
353 /* keep power while host suspended */ 311 /* keep power while host suspended */
354 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 312 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
355 if (ret) { 313 if (ret) {
356 wl1271_error("error while trying to keep power"); 314 dev_err(dev, "error while trying to keep power\n");
357 goto out; 315 goto out;
358 } 316 }
359 317
@@ -367,9 +325,10 @@ out:
367static int wl1271_resume(struct device *dev) 325static int wl1271_resume(struct device *dev)
368{ 326{
369 struct sdio_func *func = dev_to_sdio_func(dev); 327 struct sdio_func *func = dev_to_sdio_func(dev);
370 struct wl1271 *wl = sdio_get_drvdata(func); 328 struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
329 struct wl1271 *wl = platform_get_drvdata(glue->core);
371 330
372 wl1271_debug(DEBUG_MAC80211, "wl1271 resume"); 331 dev_dbg(dev, "wl1271 resume\n");
373 if (wl->wow_enabled) { 332 if (wl->wow_enabled) {
374 /* claim back host */ 333 /* claim back host */
375 sdio_claim_host(func); 334 sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
deleted file mode 100644
index f25d5d9212e7..000000000000
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ /dev/null
@@ -1,543 +0,0 @@
1/*
2 * SDIO testing driver for wl12xx
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Roger Quadros <roger.quadros@nokia.com>
7 *
8 * wl12xx read/write routines taken from the main module
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 *
24 */
25
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/crc7.h>
29#include <linux/vmalloc.h>
30#include <linux/mmc/sdio_func.h>
31#include <linux/mmc/sdio_ids.h>
32#include <linux/mmc/card.h>
33#include <linux/mmc/host.h>
34#include <linux/gpio.h>
35#include <linux/wl12xx.h>
36#include <linux/kthread.h>
37#include <linux/firmware.h>
38#include <linux/pm_runtime.h>
39
40#include "wl12xx.h"
41#include "io.h"
42#include "boot.h"
43
44#ifndef SDIO_VENDOR_ID_TI
45#define SDIO_VENDOR_ID_TI 0x0097
46#endif
47
48#ifndef SDIO_DEVICE_ID_TI_WL1271
49#define SDIO_DEVICE_ID_TI_WL1271 0x4076
50#endif
51
52static bool rx, tx;
53
54module_param(rx, bool, S_IRUGO | S_IWUSR);
55MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
56 "This test continuously reads data from the SDIO device.\n");
57
58module_param(tx, bool, S_IRUGO | S_IWUSR);
59MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
60 "This test continuously writes data to the SDIO device.\n");
61
62struct wl1271_test {
63 struct wl1271 wl;
64 struct task_struct *test_task;
65};
66
67static const struct sdio_device_id wl1271_devices[] = {
68 { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
69 {}
70};
71
72static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
73{
74 return wl->if_priv;
75}
76
77static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
78{
79 return &(wl_to_func(wl)->dev);
80}
81
82static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
83 size_t len, bool fixed)
84{
85 int ret = 0;
86 struct sdio_func *func = wl_to_func(wl);
87
88 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
89 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
90 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
91 addr, ((u8 *)buf)[0]);
92 } else {
93 if (fixed)
94 ret = sdio_readsb(func, buf, addr, len);
95 else
96 ret = sdio_memcpy_fromio(func, buf, addr, len);
97
98 wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
99 addr, len);
100 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
101 }
102
103 if (ret)
104 wl1271_error("sdio read failed (%d)", ret);
105}
106
107static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
108 size_t len, bool fixed)
109{
110 int ret = 0;
111 struct sdio_func *func = wl_to_func(wl);
112
113 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
114 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
115 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
116 addr, ((u8 *)buf)[0]);
117 } else {
118 wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
119 addr, len);
120 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
121
122 if (fixed)
123 ret = sdio_writesb(func, addr, buf, len);
124 else
125 ret = sdio_memcpy_toio(func, addr, buf, len);
126 }
127 if (ret)
128 wl1271_error("sdio write failed (%d)", ret);
129
130}
131
132static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
133{
134 struct sdio_func *func = wl_to_func(wl);
135 int ret;
136
137 /* Let the SDIO stack handle wlan_enable control, so we
138 * keep host claimed while wlan is in use to keep wl1271
139 * alive.
140 */
141 if (enable) {
142 /* Power up the card */
143 ret = pm_runtime_get_sync(&func->dev);
144 if (ret < 0)
145 goto out;
146
147 /* Runtime PM might be disabled, power up the card manually */
148 ret = mmc_power_restore_host(func->card->host);
149 if (ret < 0)
150 goto out;
151
152 sdio_claim_host(func);
153 sdio_enable_func(func);
154 } else {
155 sdio_disable_func(func);
156 sdio_release_host(func);
157
158 /* Runtime PM might be disabled, power off the card manually */
159 ret = mmc_power_save_host(func->card->host);
160 if (ret < 0)
161 goto out;
162
163 /* Power down the card */
164 ret = pm_runtime_put_sync(&func->dev);
165 }
166
167out:
168 return ret;
169}
170
171static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
172{
173}
174
175static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
176{
177}
178
179
180static struct wl1271_if_operations sdio_ops = {
181 .read = wl1271_sdio_raw_read,
182 .write = wl1271_sdio_raw_write,
183 .power = wl1271_sdio_set_power,
184 .dev = wl1271_sdio_wl_to_dev,
185 .enable_irq = wl1271_sdio_enable_interrupts,
186 .disable_irq = wl1271_sdio_disable_interrupts,
187};
188
189static void wl1271_fw_wakeup(struct wl1271 *wl)
190{
191 u32 elp_reg;
192
193 elp_reg = ELPCTRL_WAKE_UP;
194 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
195}
196
197static int wl1271_fetch_firmware(struct wl1271 *wl)
198{
199 const struct firmware *fw;
200 int ret;
201
202 if (wl->chip.id == CHIP_ID_1283_PG20)
203 ret = request_firmware(&fw, WL128X_FW_NAME,
204 wl1271_wl_to_dev(wl));
205 else
206 ret = request_firmware(&fw, WL127X_FW_NAME,
207 wl1271_wl_to_dev(wl));
208
209 if (ret < 0) {
210 wl1271_error("could not get firmware: %d", ret);
211 return ret;
212 }
213
214 if (fw->size % 4) {
215 wl1271_error("firmware size is not multiple of 32 bits: %zu",
216 fw->size);
217 ret = -EILSEQ;
218 goto out;
219 }
220
221 wl->fw_len = fw->size;
222 wl->fw = vmalloc(wl->fw_len);
223
224 if (!wl->fw) {
225 wl1271_error("could not allocate memory for the firmware");
226 ret = -ENOMEM;
227 goto out;
228 }
229
230 memcpy(wl->fw, fw->data, wl->fw_len);
231
232 ret = 0;
233
234out:
235 release_firmware(fw);
236
237 return ret;
238}
239
240static int wl1271_fetch_nvs(struct wl1271 *wl)
241{
242 const struct firmware *fw;
243 int ret;
244
245 ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
246
247 if (ret < 0) {
248 wl1271_error("could not get nvs file: %d", ret);
249 return ret;
250 }
251
252 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
253
254 if (!wl->nvs) {
255 wl1271_error("could not allocate memory for the nvs file");
256 ret = -ENOMEM;
257 goto out;
258 }
259
260 wl->nvs_len = fw->size;
261
262out:
263 release_firmware(fw);
264
265 return ret;
266}
267
268static int wl1271_chip_wakeup(struct wl1271 *wl)
269{
270 struct wl1271_partition_set partition;
271 int ret;
272
273 msleep(WL1271_PRE_POWER_ON_SLEEP);
274 ret = wl1271_power_on(wl);
275 if (ret)
276 return ret;
277
278 msleep(WL1271_POWER_ON_SLEEP);
279
280 /* We don't need a real memory partition here, because we only want
281 * to use the registers at this point. */
282 memset(&partition, 0, sizeof(partition));
283 partition.reg.start = REGISTERS_BASE;
284 partition.reg.size = REGISTERS_DOWN_SIZE;
285 wl1271_set_partition(wl, &partition);
286
287 /* ELP module wake up */
288 wl1271_fw_wakeup(wl);
289
290 /* whal_FwCtrl_BootSm() */
291
292 /* 0. read chip id from CHIP_ID */
293 wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
294
295 /* 1. check if chip id is valid */
296
297 switch (wl->chip.id) {
298 case CHIP_ID_1271_PG10:
299 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
300 wl->chip.id);
301 break;
302 case CHIP_ID_1271_PG20:
303 wl1271_notice("chip id 0x%x (1271 PG20)",
304 wl->chip.id);
305 break;
306 case CHIP_ID_1283_PG20:
307 wl1271_notice("chip id 0x%x (1283 PG20)",
308 wl->chip.id);
309 break;
310 case CHIP_ID_1283_PG10:
311 default:
312 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
313 return -ENODEV;
314 }
315
316 return ret;
317}
318
319static struct wl1271_partition_set part_down = {
320 .mem = {
321 .start = 0x00000000,
322 .size = 0x000177c0
323 },
324 .reg = {
325 .start = REGISTERS_BASE,
326 .size = 0x00008800
327 },
328 .mem2 = {
329 .start = 0x00000000,
330 .size = 0x00000000
331 },
332 .mem3 = {
333 .start = 0x00000000,
334 .size = 0x00000000
335 },
336};
337
338static int tester(void *data)
339{
340 struct wl1271 *wl = data;
341 struct sdio_func *func = wl_to_func(wl);
342 struct device *pdev = &func->dev;
343 int ret = 0;
344 bool rx_started = 0;
345 bool tx_started = 0;
346 uint8_t *tx_buf, *rx_buf;
347 int test_size = PAGE_SIZE;
348 u32 addr = 0;
349 struct wl1271_partition_set partition;
350
351 /* We assume chip is powered up and firmware fetched */
352
353 memcpy(&partition, &part_down, sizeof(partition));
354 partition.mem.start = addr;
355 wl1271_set_partition(wl, &partition);
356
357 tx_buf = kmalloc(test_size, GFP_KERNEL);
358 rx_buf = kmalloc(test_size, GFP_KERNEL);
359 if (!tx_buf || !rx_buf) {
360 dev_err(pdev,
361 "Could not allocate memory. Test will not run.\n");
362 ret = -ENOMEM;
363 goto free;
364 }
365
366 memset(tx_buf, 0x5a, test_size);
367
368 /* write something in data area so we can read it back */
369 wl1271_write(wl, addr, tx_buf, test_size, false);
370
371 while (!kthread_should_stop()) {
372 if (rx && !rx_started) {
373 dev_info(pdev, "starting rx test\n");
374 rx_started = 1;
375 } else if (!rx && rx_started) {
376 dev_info(pdev, "stopping rx test\n");
377 rx_started = 0;
378 }
379
380 if (tx && !tx_started) {
381 dev_info(pdev, "starting tx test\n");
382 tx_started = 1;
383 } else if (!tx && tx_started) {
384 dev_info(pdev, "stopping tx test\n");
385 tx_started = 0;
386 }
387
388 if (rx_started)
389 wl1271_read(wl, addr, rx_buf, test_size, false);
390
391 if (tx_started)
392 wl1271_write(wl, addr, tx_buf, test_size, false);
393
394 if (!rx_started && !tx_started)
395 msleep(100);
396 }
397
398free:
399 kfree(tx_buf);
400 kfree(rx_buf);
401 return ret;
402}
403
404static int __devinit wl1271_probe(struct sdio_func *func,
405 const struct sdio_device_id *id)
406{
407 const struct wl12xx_platform_data *wlan_data;
408 struct wl1271 *wl;
409 struct wl1271_test *wl_test;
410 int ret = 0;
411
412 /* wl1271 has 2 sdio functions we handle just the wlan part */
413 if (func->num != 0x02)
414 return -ENODEV;
415
416 wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
417 if (!wl_test) {
418 dev_err(&func->dev, "Could not allocate memory\n");
419 return -ENOMEM;
420 }
421
422 wl = &wl_test->wl;
423
424 wl->if_priv = func;
425 wl->if_ops = &sdio_ops;
426
427 /* Grab access to FN0 for ELP reg. */
428 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
429
430 /* Use block mode for transferring over one block size of data */
431 func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
432
433 wlan_data = wl12xx_get_platform_data();
434 if (IS_ERR(wlan_data)) {
435 ret = PTR_ERR(wlan_data);
436 dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
437 goto out_free;
438 }
439
440 wl->irq = wlan_data->irq;
441 wl->ref_clock = wlan_data->board_ref_clock;
442 wl->tcxo_clock = wlan_data->board_tcxo_clock;
443
444 sdio_set_drvdata(func, wl_test);
445
446 /* power up the device */
447 ret = wl1271_chip_wakeup(wl);
448 if (ret) {
449 dev_err(&func->dev, "could not wake up chip\n");
450 goto out_free;
451 }
452
453 if (wl->fw == NULL) {
454 ret = wl1271_fetch_firmware(wl);
455 if (ret < 0) {
456 dev_err(&func->dev, "firmware fetch error\n");
457 goto out_off;
458 }
459 }
460
461 /* fetch NVS */
462 if (wl->nvs == NULL) {
463 ret = wl1271_fetch_nvs(wl);
464 if (ret < 0) {
465 dev_err(&func->dev, "NVS fetch error\n");
466 goto out_off;
467 }
468 }
469
470 ret = wl1271_load_firmware(wl);
471 if (ret < 0) {
472 dev_err(&func->dev, "firmware load error: %d\n", ret);
473 goto out_free;
474 }
475
476 dev_info(&func->dev, "initialized\n");
477
478 /* I/O testing will be done in the tester thread */
479
480 wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
481 if (IS_ERR(wl_test->test_task)) {
482 dev_err(&func->dev, "unable to create kernel thread\n");
483 ret = PTR_ERR(wl_test->test_task);
484 goto out_free;
485 }
486
487 return 0;
488
489out_off:
490 /* power off the chip */
491 wl1271_power_off(wl);
492
493out_free:
494 kfree(wl_test);
495 return ret;
496}
497
498static void __devexit wl1271_remove(struct sdio_func *func)
499{
500 struct wl1271_test *wl_test = sdio_get_drvdata(func);
501
502 /* stop the I/O test thread */
503 kthread_stop(wl_test->test_task);
504
505 /* power off the chip */
506 wl1271_power_off(&wl_test->wl);
507
508 vfree(wl_test->wl.fw);
509 wl_test->wl.fw = NULL;
510 kfree(wl_test->wl.nvs);
511 wl_test->wl.nvs = NULL;
512
513 kfree(wl_test);
514}
515
516static struct sdio_driver wl1271_sdio_driver = {
517 .name = "wl12xx_sdio_test",
518 .id_table = wl1271_devices,
519 .probe = wl1271_probe,
520 .remove = __devexit_p(wl1271_remove),
521};
522
523static int __init wl1271_init(void)
524{
525 int ret;
526
527 ret = sdio_register_driver(&wl1271_sdio_driver);
528 if (ret < 0)
529 pr_err("failed to register sdio driver: %d\n", ret);
530
531 return ret;
532}
533module_init(wl1271_init);
534
535static void __exit wl1271_exit(void)
536{
537 sdio_unregister_driver(&wl1271_sdio_driver);
538}
539module_exit(wl1271_exit);
540
541MODULE_LICENSE("GPL");
542MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
543
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 0f9718677860..92caa7ce6053 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -27,6 +27,7 @@
27#include <linux/crc7.h> 27#include <linux/crc7.h>
28#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
29#include <linux/wl12xx.h> 29#include <linux/wl12xx.h>
30#include <linux/platform_device.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31 32
32#include "wl12xx.h" 33#include "wl12xx.h"
@@ -69,35 +70,22 @@
69 70
70#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) 71#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
71 72
72static inline struct spi_device *wl_to_spi(struct wl1271 *wl) 73struct wl12xx_spi_glue {
73{ 74 struct device *dev;
74 return wl->if_priv; 75 struct platform_device *core;
75} 76};
76
77static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
78{
79 return &(wl_to_spi(wl)->dev);
80}
81
82static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
83{
84 disable_irq(wl->irq);
85}
86
87static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
88{
89 enable_irq(wl->irq);
90}
91 77
92static void wl1271_spi_reset(struct wl1271 *wl) 78static void wl12xx_spi_reset(struct device *child)
93{ 79{
80 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
94 u8 *cmd; 81 u8 *cmd;
95 struct spi_transfer t; 82 struct spi_transfer t;
96 struct spi_message m; 83 struct spi_message m;
97 84
98 cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); 85 cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
99 if (!cmd) { 86 if (!cmd) {
100 wl1271_error("could not allocate cmd for spi reset"); 87 dev_err(child->parent,
88 "could not allocate cmd for spi reset\n");
101 return; 89 return;
102 } 90 }
103 91
@@ -110,21 +98,22 @@ static void wl1271_spi_reset(struct wl1271 *wl)
110 t.len = WSPI_INIT_CMD_LEN; 98 t.len = WSPI_INIT_CMD_LEN;
111 spi_message_add_tail(&t, &m); 99 spi_message_add_tail(&t, &m);
112 100
113 spi_sync(wl_to_spi(wl), &m); 101 spi_sync(to_spi_device(glue->dev), &m);
114 102
115 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
116 kfree(cmd); 103 kfree(cmd);
117} 104}
118 105
119static void wl1271_spi_init(struct wl1271 *wl) 106static void wl12xx_spi_init(struct device *child)
120{ 107{
108 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
121 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; 109 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
122 struct spi_transfer t; 110 struct spi_transfer t;
123 struct spi_message m; 111 struct spi_message m;
124 112
125 cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); 113 cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
126 if (!cmd) { 114 if (!cmd) {
127 wl1271_error("could not allocate cmd for spi init"); 115 dev_err(child->parent,
116 "could not allocate cmd for spi init\n");
128 return; 117 return;
129 } 118 }
130 119
@@ -165,15 +154,16 @@ static void wl1271_spi_init(struct wl1271 *wl)
165 t.len = WSPI_INIT_CMD_LEN; 154 t.len = WSPI_INIT_CMD_LEN;
166 spi_message_add_tail(&t, &m); 155 spi_message_add_tail(&t, &m);
167 156
168 spi_sync(wl_to_spi(wl), &m); 157 spi_sync(to_spi_device(glue->dev), &m);
169 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
170 kfree(cmd); 158 kfree(cmd);
171} 159}
172 160
173#define WL1271_BUSY_WORD_TIMEOUT 1000 161#define WL1271_BUSY_WORD_TIMEOUT 1000
174 162
175static int wl1271_spi_read_busy(struct wl1271 *wl) 163static int wl12xx_spi_read_busy(struct device *child)
176{ 164{
165 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
166 struct wl1271 *wl = dev_get_drvdata(child);
177 struct spi_transfer t[1]; 167 struct spi_transfer t[1];
178 struct spi_message m; 168 struct spi_message m;
179 u32 *busy_buf; 169 u32 *busy_buf;
@@ -194,20 +184,22 @@ static int wl1271_spi_read_busy(struct wl1271 *wl)
194 t[0].len = sizeof(u32); 184 t[0].len = sizeof(u32);
195 t[0].cs_change = true; 185 t[0].cs_change = true;
196 spi_message_add_tail(&t[0], &m); 186 spi_message_add_tail(&t[0], &m);
197 spi_sync(wl_to_spi(wl), &m); 187 spi_sync(to_spi_device(glue->dev), &m);
198 188
199 if (*busy_buf & 0x1) 189 if (*busy_buf & 0x1)
200 return 0; 190 return 0;
201 } 191 }
202 192
203 /* The SPI bus is unresponsive, the read failed. */ 193 /* The SPI bus is unresponsive, the read failed. */
204 wl1271_error("SPI read busy-word timeout!\n"); 194 dev_err(child->parent, "SPI read busy-word timeout!\n");
205 return -ETIMEDOUT; 195 return -ETIMEDOUT;
206} 196}
207 197
208static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, 198static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
209 size_t len, bool fixed) 199 size_t len, bool fixed)
210{ 200{
201 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
202 struct wl1271 *wl = dev_get_drvdata(child);
211 struct spi_transfer t[2]; 203 struct spi_transfer t[2];
212 struct spi_message m; 204 struct spi_message m;
213 u32 *busy_buf; 205 u32 *busy_buf;
@@ -243,10 +235,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
243 t[1].cs_change = true; 235 t[1].cs_change = true;
244 spi_message_add_tail(&t[1], &m); 236 spi_message_add_tail(&t[1], &m);
245 237
246 spi_sync(wl_to_spi(wl), &m); 238 spi_sync(to_spi_device(glue->dev), &m);
247 239
248 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) && 240 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
249 wl1271_spi_read_busy(wl)) { 241 wl12xx_spi_read_busy(child)) {
250 memset(buf, 0, chunk_len); 242 memset(buf, 0, chunk_len);
251 return; 243 return;
252 } 244 }
@@ -259,10 +251,7 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
259 t[0].cs_change = true; 251 t[0].cs_change = true;
260 spi_message_add_tail(&t[0], &m); 252 spi_message_add_tail(&t[0], &m);
261 253
262 spi_sync(wl_to_spi(wl), &m); 254 spi_sync(to_spi_device(glue->dev), &m);
263
264 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
265 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);
266 255
267 if (!fixed) 256 if (!fixed)
268 addr += chunk_len; 257 addr += chunk_len;
@@ -271,9 +260,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
271 } 260 }
272} 261}
273 262
274static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, 263static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
275 size_t len, bool fixed) 264 size_t len, bool fixed)
276{ 265{
266 struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
277 struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; 267 struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
278 struct spi_message m; 268 struct spi_message m;
279 u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; 269 u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
@@ -308,9 +298,6 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
308 t[i].len = chunk_len; 298 t[i].len = chunk_len;
309 spi_message_add_tail(&t[i++], &m); 299 spi_message_add_tail(&t[i++], &m);
310 300
311 wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
312 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len);
313
314 if (!fixed) 301 if (!fixed)
315 addr += chunk_len; 302 addr += chunk_len;
316 buf += chunk_len; 303 buf += chunk_len;
@@ -318,72 +305,41 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
318 cmd++; 305 cmd++;
319 } 306 }
320 307
321 spi_sync(wl_to_spi(wl), &m); 308 spi_sync(to_spi_device(glue->dev), &m);
322}
323
324static irqreturn_t wl1271_hardirq(int irq, void *cookie)
325{
326 struct wl1271 *wl = cookie;
327 unsigned long flags;
328
329 wl1271_debug(DEBUG_IRQ, "IRQ");
330
331 /* complete the ELP completion */
332 spin_lock_irqsave(&wl->wl_lock, flags);
333 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
334 if (wl->elp_compl) {
335 complete(wl->elp_compl);
336 wl->elp_compl = NULL;
337 }
338 spin_unlock_irqrestore(&wl->wl_lock, flags);
339
340 return IRQ_WAKE_THREAD;
341}
342
343static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
344{
345 if (wl->set_power)
346 wl->set_power(enable);
347
348 return 0;
349} 309}
350 310
351static struct wl1271_if_operations spi_ops = { 311static struct wl1271_if_operations spi_ops = {
352 .read = wl1271_spi_raw_read, 312 .read = wl12xx_spi_raw_read,
353 .write = wl1271_spi_raw_write, 313 .write = wl12xx_spi_raw_write,
354 .reset = wl1271_spi_reset, 314 .reset = wl12xx_spi_reset,
355 .init = wl1271_spi_init, 315 .init = wl12xx_spi_init,
356 .power = wl1271_spi_set_power,
357 .dev = wl1271_spi_wl_to_dev,
358 .enable_irq = wl1271_spi_enable_interrupts,
359 .disable_irq = wl1271_spi_disable_interrupts,
360 .set_block_size = NULL, 316 .set_block_size = NULL,
361}; 317};
362 318
363static int __devinit wl1271_probe(struct spi_device *spi) 319static int __devinit wl1271_probe(struct spi_device *spi)
364{ 320{
321 struct wl12xx_spi_glue *glue;
365 struct wl12xx_platform_data *pdata; 322 struct wl12xx_platform_data *pdata;
366 struct ieee80211_hw *hw; 323 struct resource res[1];
367 struct wl1271 *wl; 324 int ret = -ENOMEM;
368 unsigned long irqflags;
369 int ret;
370 325
371 pdata = spi->dev.platform_data; 326 pdata = spi->dev.platform_data;
372 if (!pdata) { 327 if (!pdata) {
373 wl1271_error("no platform data"); 328 dev_err(&spi->dev, "no platform data\n");
374 return -ENODEV; 329 return -ENODEV;
375 } 330 }
376 331
377 hw = wl1271_alloc_hw(); 332 pdata->ops = &spi_ops;
378 if (IS_ERR(hw))
379 return PTR_ERR(hw);
380 333
381 wl = hw->priv; 334 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
335 if (!glue) {
336 dev_err(&spi->dev, "can't allocate glue\n");
337 goto out;
338 }
382 339
383 dev_set_drvdata(&spi->dev, wl); 340 glue->dev = &spi->dev;
384 wl->if_priv = spi;
385 341
386 wl->if_ops = &spi_ops; 342 spi_set_drvdata(spi, glue);
387 343
388 /* This is the only SPI value that we need to set here, the rest 344 /* This is the only SPI value that we need to set here, the rest
389 * comes from the board-peripherals file */ 345 * comes from the board-peripherals file */
@@ -391,69 +347,61 @@ static int __devinit wl1271_probe(struct spi_device *spi)
391 347
392 ret = spi_setup(spi); 348 ret = spi_setup(spi);
393 if (ret < 0) { 349 if (ret < 0) {
394 wl1271_error("spi_setup failed"); 350 dev_err(glue->dev, "spi_setup failed\n");
395 goto out_free; 351 goto out_free_glue;
396 } 352 }
397 353
398 wl->set_power = pdata->set_power; 354 glue->core = platform_device_alloc("wl12xx", -1);
399 if (!wl->set_power) { 355 if (!glue->core) {
400 wl1271_error("set power function missing in platform data"); 356 dev_err(glue->dev, "can't allocate platform_device\n");
401 ret = -ENODEV; 357 ret = -ENOMEM;
402 goto out_free; 358 goto out_free_glue;
403 } 359 }
404 360
405 wl->ref_clock = pdata->board_ref_clock; 361 glue->core->dev.parent = &spi->dev;
406 wl->tcxo_clock = pdata->board_tcxo_clock;
407 wl->platform_quirks = pdata->platform_quirks;
408 362
409 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) 363 memset(res, 0x00, sizeof(res));
410 irqflags = IRQF_TRIGGER_RISING;
411 else
412 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
413 364
414 wl->irq = spi->irq; 365 res[0].start = spi->irq;
415 if (wl->irq < 0) { 366 res[0].flags = IORESOURCE_IRQ;
416 wl1271_error("irq missing in platform data"); 367 res[0].name = "irq";
417 ret = -ENODEV;
418 goto out_free;
419 }
420 368
421 ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, 369 ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
422 irqflags, 370 if (ret) {
423 DRIVER_NAME, wl); 371 dev_err(glue->dev, "can't add resources\n");
424 if (ret < 0) { 372 goto out_dev_put;
425 wl1271_error("request_irq() failed: %d", ret);
426 goto out_free;
427 } 373 }
428 374
429 disable_irq(wl->irq); 375 ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
430 376 if (ret) {
431 ret = wl1271_init_ieee80211(wl); 377 dev_err(glue->dev, "can't add platform data\n");
432 if (ret) 378 goto out_dev_put;
433 goto out_irq; 379 }
434 380
435 ret = wl1271_register_hw(wl); 381 ret = platform_device_add(glue->core);
436 if (ret) 382 if (ret) {
437 goto out_irq; 383 dev_err(glue->dev, "can't register platform device\n");
384 goto out_dev_put;
385 }
438 386
439 return 0; 387 return 0;
440 388
441 out_irq: 389out_dev_put:
442 free_irq(wl->irq, wl); 390 platform_device_put(glue->core);
443
444 out_free:
445 wl1271_free_hw(wl);
446 391
392out_free_glue:
393 kfree(glue);
394out:
447 return ret; 395 return ret;
448} 396}
449 397
450static int __devexit wl1271_remove(struct spi_device *spi) 398static int __devexit wl1271_remove(struct spi_device *spi)
451{ 399{
452 struct wl1271 *wl = dev_get_drvdata(&spi->dev); 400 struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
453 401
454 wl1271_unregister_hw(wl); 402 platform_device_del(glue->core);
455 free_irq(wl->irq, wl); 403 platform_device_put(glue->core);
456 wl1271_free_hw(wl); 404 kfree(glue);
457 405
458 return 0; 406 return 0;
459} 407}
@@ -462,7 +410,6 @@ static int __devexit wl1271_remove(struct spi_device *spi)
462static struct spi_driver wl1271_spi_driver = { 410static struct spi_driver wl1271_spi_driver = {
463 .driver = { 411 .driver = {
464 .name = "wl1271_spi", 412 .name = "wl1271_spi",
465 .bus = &spi_bus_type,
466 .owner = THIS_MODULE, 413 .owner = THIS_MODULE,
467 }, 414 },
468 415
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 4ae8effaee22..25093c0cb0ed 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -26,8 +26,10 @@
26#include <net/genetlink.h> 26#include <net/genetlink.h>
27 27
28#include "wl12xx.h" 28#include "wl12xx.h"
29#include "debug.h"
29#include "acx.h" 30#include "acx.h"
30#include "reg.h" 31#include "reg.h"
32#include "ps.h"
31 33
32#define WL1271_TM_MAX_DATA_LENGTH 1024 34#define WL1271_TM_MAX_DATA_LENGTH 1024
33 35
@@ -36,6 +38,7 @@ enum wl1271_tm_commands {
36 WL1271_TM_CMD_TEST, 38 WL1271_TM_CMD_TEST,
37 WL1271_TM_CMD_INTERROGATE, 39 WL1271_TM_CMD_INTERROGATE,
38 WL1271_TM_CMD_CONFIGURE, 40 WL1271_TM_CMD_CONFIGURE,
41 WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
39 WL1271_TM_CMD_SET_PLT_MODE, 42 WL1271_TM_CMD_SET_PLT_MODE,
40 WL1271_TM_CMD_RECOVER, 43 WL1271_TM_CMD_RECOVER,
41 44
@@ -87,31 +90,47 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
87 return -EMSGSIZE; 90 return -EMSGSIZE;
88 91
89 mutex_lock(&wl->mutex); 92 mutex_lock(&wl->mutex);
90 ret = wl1271_cmd_test(wl, buf, buf_len, answer);
91 mutex_unlock(&wl->mutex);
92 93
94 if (wl->state == WL1271_STATE_OFF) {
95 ret = -EINVAL;
96 goto out;
97 }
98
99 ret = wl1271_ps_elp_wakeup(wl);
100 if (ret < 0)
101 goto out;
102
103 ret = wl1271_cmd_test(wl, buf, buf_len, answer);
93 if (ret < 0) { 104 if (ret < 0) {
94 wl1271_warning("testmode cmd test failed: %d", ret); 105 wl1271_warning("testmode cmd test failed: %d", ret);
95 return ret; 106 goto out_sleep;
96 } 107 }
97 108
98 if (answer) { 109 if (answer) {
99 len = nla_total_size(buf_len); 110 len = nla_total_size(buf_len);
100 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); 111 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
101 if (!skb) 112 if (!skb) {
102 return -ENOMEM; 113 ret = -ENOMEM;
114 goto out_sleep;
115 }
103 116
104 NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); 117 NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
105 ret = cfg80211_testmode_reply(skb); 118 ret = cfg80211_testmode_reply(skb);
106 if (ret < 0) 119 if (ret < 0)
107 return ret; 120 goto out_sleep;
108 } 121 }
109 122
110 return 0; 123out_sleep:
124 wl1271_ps_elp_sleep(wl);
125out:
126 mutex_unlock(&wl->mutex);
127
128 return ret;
111 129
112nla_put_failure: 130nla_put_failure:
113 kfree_skb(skb); 131 kfree_skb(skb);
114 return -EMSGSIZE; 132 ret = -EMSGSIZE;
133 goto out_sleep;
115} 134}
116 135
117static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) 136static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -128,33 +147,53 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
128 147
129 ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); 148 ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
130 149
150 mutex_lock(&wl->mutex);
151
152 if (wl->state == WL1271_STATE_OFF) {
153 ret = -EINVAL;
154 goto out;
155 }
156
157 ret = wl1271_ps_elp_wakeup(wl);
158 if (ret < 0)
159 goto out;
160
131 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 161 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
132 if (!cmd) 162 if (!cmd) {
133 return -ENOMEM; 163 ret = -ENOMEM;
164 goto out_sleep;
165 }
134 166
135 mutex_lock(&wl->mutex);
136 ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd)); 167 ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
137 mutex_unlock(&wl->mutex);
138
139 if (ret < 0) { 168 if (ret < 0) {
140 wl1271_warning("testmode cmd interrogate failed: %d", ret); 169 wl1271_warning("testmode cmd interrogate failed: %d", ret);
141 kfree(cmd); 170 goto out_free;
142 return ret;
143 } 171 }
144 172
145 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); 173 skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
146 if (!skb) { 174 if (!skb) {
147 kfree(cmd); 175 ret = -ENOMEM;
148 return -ENOMEM; 176 goto out_free;
149 } 177 }
150 178
151 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); 179 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
180 ret = cfg80211_testmode_reply(skb);
181 if (ret < 0)
182 goto out_free;
183
184out_free:
185 kfree(cmd);
186out_sleep:
187 wl1271_ps_elp_sleep(wl);
188out:
189 mutex_unlock(&wl->mutex);
152 190
153 return 0; 191 return ret;
154 192
155nla_put_failure: 193nla_put_failure:
156 kfree_skb(skb); 194 kfree_skb(skb);
157 return -EMSGSIZE; 195 ret = -EMSGSIZE;
196 goto out_free;
158} 197}
159 198
160static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) 199static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index bad9e29d49b0..4508ccd78328 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -26,22 +26,24 @@
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27 27
28#include "wl12xx.h" 28#include "wl12xx.h"
29#include "debug.h"
29#include "io.h" 30#include "io.h"
30#include "reg.h" 31#include "reg.h"
31#include "ps.h" 32#include "ps.h"
32#include "tx.h" 33#include "tx.h"
33#include "event.h" 34#include "event.h"
34 35
35static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id) 36static int wl1271_set_default_wep_key(struct wl1271 *wl,
37 struct wl12xx_vif *wlvif, u8 id)
36{ 38{
37 int ret; 39 int ret;
38 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 40 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
39 41
40 if (is_ap) 42 if (is_ap)
41 ret = wl12xx_cmd_set_default_wep_key(wl, id, 43 ret = wl12xx_cmd_set_default_wep_key(wl, id,
42 wl->ap_bcast_hlid); 44 wlvif->ap.bcast_hlid);
43 else 45 else
44 ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid); 46 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
45 47
46 if (ret < 0) 48 if (ret < 0)
47 return ret; 49 return ret;
@@ -76,7 +78,8 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
76} 78}
77 79
78static int wl1271_tx_update_filters(struct wl1271 *wl, 80static int wl1271_tx_update_filters(struct wl1271 *wl,
79 struct sk_buff *skb) 81 struct wl12xx_vif *wlvif,
82 struct sk_buff *skb)
80{ 83{
81 struct ieee80211_hdr *hdr; 84 struct ieee80211_hdr *hdr;
82 int ret; 85 int ret;
@@ -92,15 +95,11 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
92 if (!ieee80211_is_auth(hdr->frame_control)) 95 if (!ieee80211_is_auth(hdr->frame_control))
93 return 0; 96 return 0;
94 97
95 if (wl->dev_hlid != WL12XX_INVALID_LINK_ID) 98 if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID)
96 goto out; 99 goto out;
97 100
98 wl1271_debug(DEBUG_CMD, "starting device role for roaming"); 101 wl1271_debug(DEBUG_CMD, "starting device role for roaming");
99 ret = wl12xx_cmd_role_start_dev(wl); 102 ret = wl12xx_start_dev(wl, wlvif);
100 if (ret < 0)
101 goto out;
102
103 ret = wl12xx_roc(wl, wl->dev_role_id);
104 if (ret < 0) 103 if (ret < 0)
105 goto out; 104 goto out;
106out: 105out:
@@ -123,18 +122,16 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
123 wl1271_acx_set_inconnection_sta(wl, hdr->addr1); 122 wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
124} 123}
125 124
126static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid) 125static void wl1271_tx_regulate_link(struct wl1271 *wl,
126 struct wl12xx_vif *wlvif,
127 u8 hlid)
127{ 128{
128 bool fw_ps, single_sta; 129 bool fw_ps, single_sta;
129 u8 tx_pkts; 130 u8 tx_pkts;
130 131
131 /* only regulate station links */ 132 if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
132 if (hlid < WL1271_AP_STA_HLID_START)
133 return; 133 return;
134 134
135 if (WARN_ON(!wl1271_is_active_sta(wl, hlid)))
136 return;
137
138 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 135 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
139 tx_pkts = wl->links[hlid].allocated_pkts; 136 tx_pkts = wl->links[hlid].allocated_pkts;
140 single_sta = (wl->active_sta_count == 1); 137 single_sta = (wl->active_sta_count == 1);
@@ -146,7 +143,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
146 * case FW-memory congestion is not a problem. 143 * case FW-memory congestion is not a problem.
147 */ 144 */
148 if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 145 if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
149 wl1271_ps_link_start(wl, hlid, true); 146 wl12xx_ps_link_start(wl, wlvif, hlid, true);
150} 147}
151 148
152bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) 149bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
@@ -154,7 +151,8 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
154 return wl->dummy_packet == skb; 151 return wl->dummy_packet == skb;
155} 152}
156 153
157u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb) 154u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
155 struct sk_buff *skb)
158{ 156{
159 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); 157 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
160 158
@@ -167,49 +165,51 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
167 } else { 165 } else {
168 struct ieee80211_hdr *hdr; 166 struct ieee80211_hdr *hdr;
169 167
170 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) 168 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
171 return wl->system_hlid; 169 return wl->system_hlid;
172 170
173 hdr = (struct ieee80211_hdr *)skb->data; 171 hdr = (struct ieee80211_hdr *)skb->data;
174 if (ieee80211_is_mgmt(hdr->frame_control)) 172 if (ieee80211_is_mgmt(hdr->frame_control))
175 return wl->ap_global_hlid; 173 return wlvif->ap.global_hlid;
176 else 174 else
177 return wl->ap_bcast_hlid; 175 return wlvif->ap.bcast_hlid;
178 } 176 }
179} 177}
180 178
181static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb) 179u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
180 struct sk_buff *skb)
182{ 181{
183 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 182 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
184 183
185 if (wl12xx_is_dummy_packet(wl, skb)) 184 if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
186 return wl->system_hlid; 185 return wl->system_hlid;
187 186
188 if (wl->bss_type == BSS_TYPE_AP_BSS) 187 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
189 return wl12xx_tx_get_hlid_ap(wl, skb); 188 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
190 189
191 wl1271_tx_update_filters(wl, skb); 190 wl1271_tx_update_filters(wl, wlvif, skb);
192 191
193 if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || 192 if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
194 test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) && 193 test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
195 !ieee80211_is_auth(hdr->frame_control) && 194 !ieee80211_is_auth(hdr->frame_control) &&
196 !ieee80211_is_assoc_req(hdr->frame_control)) 195 !ieee80211_is_assoc_req(hdr->frame_control))
197 return wl->sta_hlid; 196 return wlvif->sta.hlid;
198 else 197 else
199 return wl->dev_hlid; 198 return wlvif->dev_hlid;
200} 199}
201 200
202static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, 201static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
203 unsigned int packet_length) 202 unsigned int packet_length)
204{ 203{
205 if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT) 204 if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
206 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
207 else
208 return ALIGN(packet_length, WL1271_TX_ALIGN_TO); 205 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
206 else
207 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
209} 208}
210 209
211static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, 210static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
212 u32 buf_offset, u8 hlid) 211 struct sk_buff *skb, u32 extra, u32 buf_offset,
212 u8 hlid)
213{ 213{
214 struct wl1271_tx_hw_descr *desc; 214 struct wl1271_tx_hw_descr *desc;
215 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 215 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -217,6 +217,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
217 u32 total_blocks; 217 u32 total_blocks;
218 int id, ret = -EBUSY, ac; 218 int id, ret = -EBUSY, ac;
219 u32 spare_blocks = wl->tx_spare_blocks; 219 u32 spare_blocks = wl->tx_spare_blocks;
220 bool is_dummy = false;
220 221
221 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 222 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
222 return -EAGAIN; 223 return -EAGAIN;
@@ -231,8 +232,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
231 len = wl12xx_calc_packet_alignment(wl, total_len); 232 len = wl12xx_calc_packet_alignment(wl, total_len);
232 233
233 /* in case of a dummy packet, use default amount of spare mem blocks */ 234 /* in case of a dummy packet, use default amount of spare mem blocks */
234 if (unlikely(wl12xx_is_dummy_packet(wl, skb))) 235 if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
236 is_dummy = true;
235 spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; 237 spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
238 }
236 239
237 total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + 240 total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
238 spare_blocks; 241 spare_blocks;
@@ -257,8 +260,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
257 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 260 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
258 wl->tx_allocated_pkts[ac]++; 261 wl->tx_allocated_pkts[ac]++;
259 262
260 if (wl->bss_type == BSS_TYPE_AP_BSS && 263 if (!is_dummy && wlvif &&
261 hlid >= WL1271_AP_STA_HLID_START) 264 wlvif->bss_type == BSS_TYPE_AP_BSS &&
265 test_bit(hlid, wlvif->ap.sta_hlid_map))
262 wl->links[hlid].allocated_pkts++; 266 wl->links[hlid].allocated_pkts++;
263 267
264 ret = 0; 268 ret = 0;
@@ -273,15 +277,16 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
273 return ret; 277 return ret;
274} 278}
275 279
276static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, 280static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
277 u32 extra, struct ieee80211_tx_info *control, 281 struct sk_buff *skb, u32 extra,
278 u8 hlid) 282 struct ieee80211_tx_info *control, u8 hlid)
279{ 283{
280 struct timespec ts; 284 struct timespec ts;
281 struct wl1271_tx_hw_descr *desc; 285 struct wl1271_tx_hw_descr *desc;
282 int aligned_len, ac, rate_idx; 286 int aligned_len, ac, rate_idx;
283 s64 hosttime; 287 s64 hosttime;
284 u16 tx_attr; 288 u16 tx_attr = 0;
289 bool is_dummy;
285 290
286 desc = (struct wl1271_tx_hw_descr *) skb->data; 291 desc = (struct wl1271_tx_hw_descr *) skb->data;
287 292
@@ -298,7 +303,8 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
298 hosttime = (timespec_to_ns(&ts) >> 10); 303 hosttime = (timespec_to_ns(&ts) >> 10);
299 desc->start_time = cpu_to_le32(hosttime - wl->time_offset); 304 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
300 305
301 if (wl->bss_type != BSS_TYPE_AP_BSS) 306 is_dummy = wl12xx_is_dummy_packet(wl, skb);
307 if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
302 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); 308 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
303 else 309 else
304 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); 310 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
@@ -307,39 +313,42 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
307 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 313 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
308 desc->tid = skb->priority; 314 desc->tid = skb->priority;
309 315
310 if (wl12xx_is_dummy_packet(wl, skb)) { 316 if (is_dummy) {
311 /* 317 /*
312 * FW expects the dummy packet to have an invalid session id - 318 * FW expects the dummy packet to have an invalid session id -
313 * any session id that is different than the one set in the join 319 * any session id that is different than the one set in the join
314 */ 320 */
315 tx_attr = ((~wl->session_counter) << 321 tx_attr = (SESSION_COUNTER_INVALID <<
316 TX_HW_ATTR_OFST_SESSION_COUNTER) & 322 TX_HW_ATTR_OFST_SESSION_COUNTER) &
317 TX_HW_ATTR_SESSION_COUNTER; 323 TX_HW_ATTR_SESSION_COUNTER;
318 324
319 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; 325 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
320 } else { 326 } else if (wlvif) {
321 /* configure the tx attributes */ 327 /* configure the tx attributes */
322 tx_attr = 328 tx_attr = wlvif->session_counter <<
323 wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; 329 TX_HW_ATTR_OFST_SESSION_COUNTER;
324 } 330 }
325 331
326 desc->hlid = hlid; 332 desc->hlid = hlid;
327 333 if (is_dummy || !wlvif)
328 if (wl->bss_type != BSS_TYPE_AP_BSS) { 334 rate_idx = 0;
335 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
329 /* if the packets are destined for AP (have a STA entry) 336 /* if the packets are destined for AP (have a STA entry)
330 send them with AP rate policies, otherwise use default 337 send them with AP rate policies, otherwise use default
331 basic rates */ 338 basic rates */
332 if (control->control.sta) 339 if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
333 rate_idx = ACX_TX_AP_FULL_RATE; 340 rate_idx = wlvif->sta.p2p_rate_idx;
341 else if (control->control.sta)
342 rate_idx = wlvif->sta.ap_rate_idx;
334 else 343 else
335 rate_idx = ACX_TX_BASIC_RATE; 344 rate_idx = wlvif->sta.basic_rate_idx;
336 } else { 345 } else {
337 if (hlid == wl->ap_global_hlid) 346 if (hlid == wlvif->ap.global_hlid)
338 rate_idx = ACX_TX_AP_MODE_MGMT_RATE; 347 rate_idx = wlvif->ap.mgmt_rate_idx;
339 else if (hlid == wl->ap_bcast_hlid) 348 else if (hlid == wlvif->ap.bcast_hlid)
340 rate_idx = ACX_TX_AP_MODE_BCST_RATE; 349 rate_idx = wlvif->ap.bcast_rate_idx;
341 else 350 else
342 rate_idx = ac; 351 rate_idx = wlvif->ap.ucast_rate_idx[ac];
343 } 352 }
344 353
345 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; 354 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -379,20 +388,24 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
379} 388}
380 389
381/* caller must hold wl->mutex */ 390/* caller must hold wl->mutex */
382static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, 391static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
383 u32 buf_offset) 392 struct sk_buff *skb, u32 buf_offset)
384{ 393{
385 struct ieee80211_tx_info *info; 394 struct ieee80211_tx_info *info;
386 u32 extra = 0; 395 u32 extra = 0;
387 int ret = 0; 396 int ret = 0;
388 u32 total_len; 397 u32 total_len;
389 u8 hlid; 398 u8 hlid;
399 bool is_dummy;
390 400
391 if (!skb) 401 if (!skb)
392 return -EINVAL; 402 return -EINVAL;
393 403
394 info = IEEE80211_SKB_CB(skb); 404 info = IEEE80211_SKB_CB(skb);
395 405
406 /* TODO: handle dummy packets on multi-vifs */
407 is_dummy = wl12xx_is_dummy_packet(wl, skb);
408
396 if (info->control.hw_key && 409 if (info->control.hw_key &&
397 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) 410 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
398 extra = WL1271_TKIP_IV_SPACE; 411 extra = WL1271_TKIP_IV_SPACE;
@@ -405,29 +418,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
405 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || 418 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
406 (cipher == WLAN_CIPHER_SUITE_WEP104); 419 (cipher == WLAN_CIPHER_SUITE_WEP104);
407 420
408 if (unlikely(is_wep && wl->default_key != idx)) { 421 if (unlikely(is_wep && wlvif->default_key != idx)) {
409 ret = wl1271_set_default_wep_key(wl, idx); 422 ret = wl1271_set_default_wep_key(wl, wlvif, idx);
410 if (ret < 0) 423 if (ret < 0)
411 return ret; 424 return ret;
412 wl->default_key = idx; 425 wlvif->default_key = idx;
413 } 426 }
414 } 427 }
415 428 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
416 hlid = wl1271_tx_get_hlid(wl, skb);
417 if (hlid == WL12XX_INVALID_LINK_ID) { 429 if (hlid == WL12XX_INVALID_LINK_ID) {
418 wl1271_error("invalid hlid. dropping skb 0x%p", skb); 430 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
419 return -EINVAL; 431 return -EINVAL;
420 } 432 }
421 433
422 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid); 434 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
423 if (ret < 0) 435 if (ret < 0)
424 return ret; 436 return ret;
425 437
426 wl1271_tx_fill_hdr(wl, skb, extra, info, hlid); 438 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
427 439
428 if (wl->bss_type == BSS_TYPE_AP_BSS) { 440 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
429 wl1271_tx_ap_update_inconnection_sta(wl, skb); 441 wl1271_tx_ap_update_inconnection_sta(wl, skb);
430 wl1271_tx_regulate_link(wl, hlid); 442 wl1271_tx_regulate_link(wl, wlvif, hlid);
431 } 443 }
432 444
433 /* 445 /*
@@ -444,7 +456,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
444 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 456 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
445 457
446 /* Revert side effects in the dummy packet skb, so it can be reused */ 458 /* Revert side effects in the dummy packet skb, so it can be reused */
447 if (wl12xx_is_dummy_packet(wl, skb)) 459 if (is_dummy)
448 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 460 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
449 461
450 return total_len; 462 return total_len;
@@ -522,19 +534,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
522 return &queues[q]; 534 return &queues[q];
523} 535}
524 536
525static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) 537static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
538 struct wl1271_link *lnk)
526{ 539{
527 struct sk_buff *skb = NULL; 540 struct sk_buff *skb;
528 unsigned long flags; 541 unsigned long flags;
529 struct sk_buff_head *queue; 542 struct sk_buff_head *queue;
530 543
531 queue = wl1271_select_queue(wl, wl->tx_queue); 544 queue = wl1271_select_queue(wl, lnk->tx_queue);
532 if (!queue) 545 if (!queue)
533 goto out; 546 return NULL;
534 547
535 skb = skb_dequeue(queue); 548 skb = skb_dequeue(queue);
536
537out:
538 if (skb) { 549 if (skb) {
539 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 550 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
540 spin_lock_irqsave(&wl->wl_lock, flags); 551 spin_lock_irqsave(&wl->wl_lock, flags);
@@ -545,43 +556,33 @@ out:
545 return skb; 556 return skb;
546} 557}
547 558
548static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl) 559static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
560 struct wl12xx_vif *wlvif)
549{ 561{
550 struct sk_buff *skb = NULL; 562 struct sk_buff *skb = NULL;
551 unsigned long flags;
552 int i, h, start_hlid; 563 int i, h, start_hlid;
553 struct sk_buff_head *queue;
554 564
555 /* start from the link after the last one */ 565 /* start from the link after the last one */
556 start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS; 566 start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
557 567
558 /* dequeue according to AC, round robin on each link */ 568 /* dequeue according to AC, round robin on each link */
559 for (i = 0; i < AP_MAX_LINKS; i++) { 569 for (i = 0; i < WL12XX_MAX_LINKS; i++) {
560 h = (start_hlid + i) % AP_MAX_LINKS; 570 h = (start_hlid + i) % WL12XX_MAX_LINKS;
561 571
562 /* only consider connected stations */ 572 /* only consider connected stations */
563 if (h >= WL1271_AP_STA_HLID_START && 573 if (!test_bit(h, wlvif->links_map))
564 !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
565 continue; 574 continue;
566 575
567 queue = wl1271_select_queue(wl, wl->links[h].tx_queue); 576 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
568 if (!queue) 577 if (!skb)
569 continue; 578 continue;
570 579
571 skb = skb_dequeue(queue); 580 wlvif->last_tx_hlid = h;
572 if (skb) 581 break;
573 break;
574 } 582 }
575 583
576 if (skb) { 584 if (!skb)
577 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 585 wlvif->last_tx_hlid = 0;
578 wl->last_tx_hlid = h;
579 spin_lock_irqsave(&wl->wl_lock, flags);
580 wl->tx_queue_count[q]--;
581 spin_unlock_irqrestore(&wl->wl_lock, flags);
582 } else {
583 wl->last_tx_hlid = 0;
584 }
585 586
586 return skb; 587 return skb;
587} 588}
@@ -589,12 +590,32 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
589static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) 590static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
590{ 591{
591 unsigned long flags; 592 unsigned long flags;
593 struct wl12xx_vif *wlvif = wl->last_wlvif;
592 struct sk_buff *skb = NULL; 594 struct sk_buff *skb = NULL;
593 595
594 if (wl->bss_type == BSS_TYPE_AP_BSS) 596 if (wlvif) {
595 skb = wl1271_ap_skb_dequeue(wl); 597 wl12xx_for_each_wlvif_continue(wl, wlvif) {
596 else 598 skb = wl12xx_vif_skb_dequeue(wl, wlvif);
597 skb = wl1271_sta_skb_dequeue(wl); 599 if (skb) {
600 wl->last_wlvif = wlvif;
601 break;
602 }
603 }
604 }
605
606 /* do another pass */
607 if (!skb) {
608 wl12xx_for_each_wlvif(wl, wlvif) {
609 skb = wl12xx_vif_skb_dequeue(wl, wlvif);
610 if (skb) {
611 wl->last_wlvif = wlvif;
612 break;
613 }
614 }
615 }
616
617 if (!skb)
618 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
598 619
599 if (!skb && 620 if (!skb &&
600 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { 621 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
@@ -610,21 +631,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
610 return skb; 631 return skb;
611} 632}
612 633
613static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) 634static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
635 struct sk_buff *skb)
614{ 636{
615 unsigned long flags; 637 unsigned long flags;
616 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 638 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
617 639
618 if (wl12xx_is_dummy_packet(wl, skb)) { 640 if (wl12xx_is_dummy_packet(wl, skb)) {
619 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 641 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
620 } else if (wl->bss_type == BSS_TYPE_AP_BSS) { 642 } else {
621 u8 hlid = wl1271_tx_get_hlid(wl, skb); 643 u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
622 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 644 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
623 645
624 /* make sure we dequeue the same packet next time */ 646 /* make sure we dequeue the same packet next time */
625 wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS; 647 wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
626 } else { 648 WL12XX_MAX_LINKS;
627 skb_queue_head(&wl->tx_queue[q], skb);
628 } 649 }
629 650
630 spin_lock_irqsave(&wl->wl_lock, flags); 651 spin_lock_irqsave(&wl->wl_lock, flags);
@@ -639,29 +660,71 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb)
639 return ieee80211_is_data_present(hdr->frame_control); 660 return ieee80211_is_data_present(hdr->frame_control);
640} 661}
641 662
663void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
664{
665 struct wl12xx_vif *wlvif;
666 u32 timeout;
667 u8 hlid;
668
669 if (!wl->conf.rx_streaming.interval)
670 return;
671
672 if (!wl->conf.rx_streaming.always &&
673 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
674 return;
675
676 timeout = wl->conf.rx_streaming.duration;
677 wl12xx_for_each_wlvif_sta(wl, wlvif) {
678 bool found = false;
679 for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
680 if (test_bit(hlid, wlvif->links_map)) {
681 found = true;
682 break;
683 }
684 }
685
686 if (!found)
687 continue;
688
689 /* enable rx streaming */
690 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
691 ieee80211_queue_work(wl->hw,
692 &wlvif->rx_streaming_enable_work);
693
694 mod_timer(&wlvif->rx_streaming_timer,
695 jiffies + msecs_to_jiffies(timeout));
696 }
697}
698
642void wl1271_tx_work_locked(struct wl1271 *wl) 699void wl1271_tx_work_locked(struct wl1271 *wl)
643{ 700{
701 struct wl12xx_vif *wlvif;
644 struct sk_buff *skb; 702 struct sk_buff *skb;
703 struct wl1271_tx_hw_descr *desc;
645 u32 buf_offset = 0; 704 u32 buf_offset = 0;
646 bool sent_packets = false; 705 bool sent_packets = false;
647 bool had_data = false; 706 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
648 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
649 int ret; 707 int ret;
650 708
651 if (unlikely(wl->state == WL1271_STATE_OFF)) 709 if (unlikely(wl->state == WL1271_STATE_OFF))
652 return; 710 return;
653 711
654 while ((skb = wl1271_skb_dequeue(wl))) { 712 while ((skb = wl1271_skb_dequeue(wl))) {
655 if (wl1271_tx_is_data_present(skb)) 713 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
656 had_data = true; 714 bool has_data = false;
657 715
658 ret = wl1271_prepare_tx_frame(wl, skb, buf_offset); 716 wlvif = NULL;
717 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
718 wlvif = wl12xx_vif_to_data(info->control.vif);
719
720 has_data = wlvif && wl1271_tx_is_data_present(skb);
721 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
659 if (ret == -EAGAIN) { 722 if (ret == -EAGAIN) {
660 /* 723 /*
661 * Aggregation buffer is full. 724 * Aggregation buffer is full.
662 * Flush buffer and try again. 725 * Flush buffer and try again.
663 */ 726 */
664 wl1271_skb_queue_head(wl, skb); 727 wl1271_skb_queue_head(wl, wlvif, skb);
665 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 728 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
666 buf_offset, true); 729 buf_offset, true);
667 sent_packets = true; 730 sent_packets = true;
@@ -672,16 +735,27 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
672 * Firmware buffer is full. 735 * Firmware buffer is full.
673 * Queue back last skb, and stop aggregating. 736 * Queue back last skb, and stop aggregating.
674 */ 737 */
675 wl1271_skb_queue_head(wl, skb); 738 wl1271_skb_queue_head(wl, wlvif, skb);
676 /* No work left, avoid scheduling redundant tx work */ 739 /* No work left, avoid scheduling redundant tx work */
677 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 740 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
678 goto out_ack; 741 goto out_ack;
679 } else if (ret < 0) { 742 } else if (ret < 0) {
680 dev_kfree_skb(skb); 743 if (wl12xx_is_dummy_packet(wl, skb))
744 /*
745 * fw still expects dummy packet,
746 * so re-enqueue it
747 */
748 wl1271_skb_queue_head(wl, wlvif, skb);
749 else
750 ieee80211_free_txskb(wl->hw, skb);
681 goto out_ack; 751 goto out_ack;
682 } 752 }
683 buf_offset += ret; 753 buf_offset += ret;
684 wl->tx_packets_count++; 754 wl->tx_packets_count++;
755 if (has_data) {
756 desc = (struct wl1271_tx_hw_descr *) skb->data;
757 __set_bit(desc->hlid, active_hlids);
758 }
685 } 759 }
686 760
687out_ack: 761out_ack:
@@ -701,19 +775,7 @@ out_ack:
701 775
702 wl1271_handle_tx_low_watermark(wl); 776 wl1271_handle_tx_low_watermark(wl);
703 } 777 }
704 if (!is_ap && wl->conf.rx_streaming.interval && had_data && 778 wl12xx_rearm_rx_streaming(wl, active_hlids);
705 (wl->conf.rx_streaming.always ||
706 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
707 u32 timeout = wl->conf.rx_streaming.duration;
708
709 /* enable rx streaming */
710 if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
711 ieee80211_queue_work(wl->hw,
712 &wl->rx_streaming_enable_work);
713
714 mod_timer(&wl->rx_streaming_timer,
715 jiffies + msecs_to_jiffies(timeout));
716 }
717} 779}
718 780
719void wl1271_tx_work(struct work_struct *work) 781void wl1271_tx_work(struct work_struct *work)
@@ -737,6 +799,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
737 struct wl1271_tx_hw_res_descr *result) 799 struct wl1271_tx_hw_res_descr *result)
738{ 800{
739 struct ieee80211_tx_info *info; 801 struct ieee80211_tx_info *info;
802 struct ieee80211_vif *vif;
803 struct wl12xx_vif *wlvif;
740 struct sk_buff *skb; 804 struct sk_buff *skb;
741 int id = result->id; 805 int id = result->id;
742 int rate = -1; 806 int rate = -1;
@@ -756,11 +820,16 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
756 return; 820 return;
757 } 821 }
758 822
823 /* info->control is valid as long as we don't update info->status */
824 vif = info->control.vif;
825 wlvif = wl12xx_vif_to_data(vif);
826
759 /* update the TX status info */ 827 /* update the TX status info */
760 if (result->status == TX_SUCCESS) { 828 if (result->status == TX_SUCCESS) {
761 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 829 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
762 info->flags |= IEEE80211_TX_STAT_ACK; 830 info->flags |= IEEE80211_TX_STAT_ACK;
763 rate = wl1271_rate_to_idx(result->rate_class_index, wl->band); 831 rate = wl1271_rate_to_idx(result->rate_class_index,
832 wlvif->band);
764 retries = result->ack_failures; 833 retries = result->ack_failures;
765 } else if (result->status == TX_RETRY_EXCEEDED) { 834 } else if (result->status == TX_RETRY_EXCEEDED) {
766 wl->stats.excessive_retries++; 835 wl->stats.excessive_retries++;
@@ -783,14 +852,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
783 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || 852 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
784 info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { 853 info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
785 u8 fw_lsb = result->tx_security_sequence_number_lsb; 854 u8 fw_lsb = result->tx_security_sequence_number_lsb;
786 u8 cur_lsb = wl->tx_security_last_seq_lsb; 855 u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
787 856
788 /* 857 /*
789 * update security sequence number, taking care of potential 858 * update security sequence number, taking care of potential
790 * wrap-around 859 * wrap-around
791 */ 860 */
792 wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256; 861 wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
793 wl->tx_security_last_seq_lsb = fw_lsb; 862 wlvif->tx_security_last_seq_lsb = fw_lsb;
794 } 863 }
795 864
796 /* remove private header from packet */ 865 /* remove private header from packet */
@@ -886,39 +955,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
886} 955}
887 956
888/* caller must hold wl->mutex and TX must be stopped */ 957/* caller must hold wl->mutex and TX must be stopped */
889void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues) 958void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
890{ 959{
891 int i; 960 int i;
892 struct sk_buff *skb;
893 struct ieee80211_tx_info *info;
894 961
895 /* TX failure */ 962 /* TX failure */
896 if (wl->bss_type == BSS_TYPE_AP_BSS) { 963 for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
897 for (i = 0; i < AP_MAX_LINKS; i++) { 964 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
898 wl1271_free_sta(wl, i); 965 wl1271_free_sta(wl, wlvif, i);
899 wl1271_tx_reset_link_queues(wl, i); 966 else
900 wl->links[i].allocated_pkts = 0; 967 wlvif->sta.ba_rx_bitmap = 0;
901 wl->links[i].prev_freed_pkts = 0;
902 }
903
904 wl->last_tx_hlid = 0;
905 } else {
906 for (i = 0; i < NUM_TX_QUEUES; i++) {
907 while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
908 wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
909 skb);
910
911 if (!wl12xx_is_dummy_packet(wl, skb)) {
912 info = IEEE80211_SKB_CB(skb);
913 info->status.rates[0].idx = -1;
914 info->status.rates[0].count = 0;
915 ieee80211_tx_status_ni(wl->hw, skb);
916 }
917 }
918 }
919 968
920 wl->ba_rx_bitmap = 0; 969 wl1271_tx_reset_link_queues(wl, i);
970 wl->links[i].allocated_pkts = 0;
971 wl->links[i].prev_freed_pkts = 0;
921 } 972 }
973 wlvif->last_tx_hlid = 0;
974
975}
976/* caller must hold wl->mutex and TX must be stopped */
977void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
978{
979 int i;
980 struct sk_buff *skb;
981 struct ieee80211_tx_info *info;
922 982
923 for (i = 0; i < NUM_TX_QUEUES; i++) 983 for (i = 0; i < NUM_TX_QUEUES; i++)
924 wl->tx_queue_count[i] = 0; 984 wl->tx_queue_count[i] = 0;
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index dc4f09adf088..2dbb24e6d541 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -206,18 +206,23 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
206void wl1271_tx_work(struct work_struct *work); 206void wl1271_tx_work(struct work_struct *work);
207void wl1271_tx_work_locked(struct wl1271 *wl); 207void wl1271_tx_work_locked(struct wl1271 *wl);
208void wl1271_tx_complete(struct wl1271 *wl); 208void wl1271_tx_complete(struct wl1271 *wl);
209void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues); 209void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
210void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
210void wl1271_tx_flush(struct wl1271 *wl); 211void wl1271_tx_flush(struct wl1271 *wl);
211u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 212u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
212u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 213u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
213 enum ieee80211_band rate_band); 214 enum ieee80211_band rate_band);
214u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); 215u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
215u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb); 216u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
217 struct sk_buff *skb);
218u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
219 struct sk_buff *skb);
216void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); 220void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
217void wl1271_handle_tx_low_watermark(struct wl1271 *wl); 221void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
218bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); 222bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
223void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
219 224
220/* from main.c */ 225/* from main.c */
221void wl1271_free_sta(struct wl1271 *wl, u8 hlid); 226void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
222 227
223#endif 228#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 1ec90fc7505e..d21f71ff6f64 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -35,83 +35,6 @@
35#include "conf.h" 35#include "conf.h"
36#include "ini.h" 36#include "ini.h"
37 37
38#define DRIVER_NAME "wl1271"
39#define DRIVER_PREFIX DRIVER_NAME ": "
40
41/*
42 * FW versions support BA 11n
43 * versions marks x.x.x.50-60.x
44 */
45#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
46#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
47
48enum {
49 DEBUG_NONE = 0,
50 DEBUG_IRQ = BIT(0),
51 DEBUG_SPI = BIT(1),
52 DEBUG_BOOT = BIT(2),
53 DEBUG_MAILBOX = BIT(3),
54 DEBUG_TESTMODE = BIT(4),
55 DEBUG_EVENT = BIT(5),
56 DEBUG_TX = BIT(6),
57 DEBUG_RX = BIT(7),
58 DEBUG_SCAN = BIT(8),
59 DEBUG_CRYPT = BIT(9),
60 DEBUG_PSM = BIT(10),
61 DEBUG_MAC80211 = BIT(11),
62 DEBUG_CMD = BIT(12),
63 DEBUG_ACX = BIT(13),
64 DEBUG_SDIO = BIT(14),
65 DEBUG_FILTERS = BIT(15),
66 DEBUG_ADHOC = BIT(16),
67 DEBUG_AP = BIT(17),
68 DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
69 DEBUG_ALL = ~0,
70};
71
72extern u32 wl12xx_debug_level;
73
74#define DEBUG_DUMP_LIMIT 1024
75
76#define wl1271_error(fmt, arg...) \
77 pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
78
79#define wl1271_warning(fmt, arg...) \
80 pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
81
82#define wl1271_notice(fmt, arg...) \
83 pr_info(DRIVER_PREFIX fmt "\n", ##arg)
84
85#define wl1271_info(fmt, arg...) \
86 pr_info(DRIVER_PREFIX fmt "\n", ##arg)
87
88#define wl1271_debug(level, fmt, arg...) \
89 do { \
90 if (level & wl12xx_debug_level) \
91 pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
92 } while (0)
93
94/* TODO: use pr_debug_hex_dump when it will be available */
95#define wl1271_dump(level, prefix, buf, len) \
96 do { \
97 if (level & wl12xx_debug_level) \
98 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
99 DUMP_PREFIX_OFFSET, 16, 1, \
100 buf, \
101 min_t(size_t, len, DEBUG_DUMP_LIMIT), \
102 0); \
103 } while (0)
104
105#define wl1271_dump_ascii(level, prefix, buf, len) \
106 do { \
107 if (level & wl12xx_debug_level) \
108 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
109 DUMP_PREFIX_OFFSET, 16, 1, \
110 buf, \
111 min_t(size_t, len, DEBUG_DUMP_LIMIT), \
112 true); \
113 } while (0)
114
115#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin" 38#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
116#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin" 39#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
117 40
@@ -142,16 +65,12 @@ extern u32 wl12xx_debug_level;
142#define WL12XX_INVALID_ROLE_ID 0xff 65#define WL12XX_INVALID_ROLE_ID 0xff
143#define WL12XX_INVALID_LINK_ID 0xff 66#define WL12XX_INVALID_LINK_ID 0xff
144 67
68#define WL12XX_MAX_RATE_POLICIES 16
69
145/* Defined by FW as 0. Will not be freed or allocated. */ 70/* Defined by FW as 0. Will not be freed or allocated. */
146#define WL12XX_SYSTEM_HLID 0 71#define WL12XX_SYSTEM_HLID 0
147 72
148/* 73/*
149 * TODO: we currently don't support multirole. remove
150 * this constant from the code when we do.
151 */
152#define WL1271_AP_STA_HLID_START 3
153
154/*
155 * When in AP-mode, we allow (at least) this number of packets 74 * When in AP-mode, we allow (at least) this number of packets
156 * to be transmitted to FW for a STA in PS-mode. Only when packets are 75 * to be transmitted to FW for a STA in PS-mode. Only when packets are
157 * present in the FW buffers it will wake the sleeping STA. We want to put 76 * present in the FW buffers it will wake the sleeping STA. We want to put
@@ -236,13 +155,6 @@ struct wl1271_stats {
236 155
237#define AP_MAX_STATIONS 8 156#define AP_MAX_STATIONS 8
238 157
239/* Broadcast and Global links + system link + links to stations */
240/*
241 * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
242 * the places that use this.
243 */
244#define AP_MAX_LINKS (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START)
245
246/* FW status registers */ 158/* FW status registers */
247struct wl12xx_fw_status { 159struct wl12xx_fw_status {
248 __le32 intr; 160 __le32 intr;
@@ -299,17 +211,14 @@ struct wl1271_scan {
299}; 211};
300 212
301struct wl1271_if_operations { 213struct wl1271_if_operations {
302 void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len, 214 void (*read)(struct device *child, int addr, void *buf, size_t len,
303 bool fixed); 215 bool fixed);
304 void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len, 216 void (*write)(struct device *child, int addr, void *buf, size_t len,
305 bool fixed); 217 bool fixed);
306 void (*reset)(struct wl1271 *wl); 218 void (*reset)(struct device *child);
307 void (*init)(struct wl1271 *wl); 219 void (*init)(struct device *child);
308 int (*power)(struct wl1271 *wl, bool enable); 220 int (*power)(struct device *child, bool enable);
309 struct device* (*dev)(struct wl1271 *wl); 221 void (*set_block_size) (struct device *child, unsigned int blksz);
310 void (*enable_irq)(struct wl1271 *wl);
311 void (*disable_irq)(struct wl1271 *wl);
312 void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
313}; 222};
314 223
315#define MAX_NUM_KEYS 14 224#define MAX_NUM_KEYS 14
@@ -326,29 +235,33 @@ struct wl1271_ap_key {
326}; 235};
327 236
328enum wl12xx_flags { 237enum wl12xx_flags {
329 WL1271_FLAG_STA_ASSOCIATED,
330 WL1271_FLAG_IBSS_JOINED,
331 WL1271_FLAG_GPIO_POWER, 238 WL1271_FLAG_GPIO_POWER,
332 WL1271_FLAG_TX_QUEUE_STOPPED, 239 WL1271_FLAG_TX_QUEUE_STOPPED,
333 WL1271_FLAG_TX_PENDING, 240 WL1271_FLAG_TX_PENDING,
334 WL1271_FLAG_IN_ELP, 241 WL1271_FLAG_IN_ELP,
335 WL1271_FLAG_ELP_REQUESTED, 242 WL1271_FLAG_ELP_REQUESTED,
336 WL1271_FLAG_PSM,
337 WL1271_FLAG_PSM_REQUESTED,
338 WL1271_FLAG_IRQ_RUNNING, 243 WL1271_FLAG_IRQ_RUNNING,
339 WL1271_FLAG_IDLE, 244 WL1271_FLAG_IDLE,
340 WL1271_FLAG_PSPOLL_FAILURE,
341 WL1271_FLAG_STA_STATE_SENT,
342 WL1271_FLAG_FW_TX_BUSY, 245 WL1271_FLAG_FW_TX_BUSY,
343 WL1271_FLAG_AP_STARTED,
344 WL1271_FLAG_IF_INITIALIZED,
345 WL1271_FLAG_DUMMY_PACKET_PENDING, 246 WL1271_FLAG_DUMMY_PACKET_PENDING,
346 WL1271_FLAG_SUSPENDED, 247 WL1271_FLAG_SUSPENDED,
347 WL1271_FLAG_PENDING_WORK, 248 WL1271_FLAG_PENDING_WORK,
348 WL1271_FLAG_SOFT_GEMINI, 249 WL1271_FLAG_SOFT_GEMINI,
349 WL1271_FLAG_RX_STREAMING_STARTED,
350 WL1271_FLAG_RECOVERY_IN_PROGRESS, 250 WL1271_FLAG_RECOVERY_IN_PROGRESS,
351 WL1271_FLAG_CS_PROGRESS, 251};
252
253enum wl12xx_vif_flags {
254 WLVIF_FLAG_INITIALIZED,
255 WLVIF_FLAG_STA_ASSOCIATED,
256 WLVIF_FLAG_IBSS_JOINED,
257 WLVIF_FLAG_AP_STARTED,
258 WLVIF_FLAG_PSM,
259 WLVIF_FLAG_PSM_REQUESTED,
260 WLVIF_FLAG_STA_STATE_SENT,
261 WLVIF_FLAG_RX_STREAMING_STARTED,
262 WLVIF_FLAG_PSPOLL_FAILURE,
263 WLVIF_FLAG_CS_PROGRESS,
264 WLVIF_FLAG_AP_PROBE_RESP_SET,
352}; 265};
353 266
354struct wl1271_link { 267struct wl1271_link {
@@ -366,10 +279,11 @@ struct wl1271_link {
366}; 279};
367 280
368struct wl1271 { 281struct wl1271 {
369 struct platform_device *plat_dev;
370 struct ieee80211_hw *hw; 282 struct ieee80211_hw *hw;
371 bool mac80211_registered; 283 bool mac80211_registered;
372 284
285 struct device *dev;
286
373 void *if_priv; 287 void *if_priv;
374 288
375 struct wl1271_if_operations *if_ops; 289 struct wl1271_if_operations *if_ops;
@@ -399,25 +313,20 @@ struct wl1271 {
399 313
400 s8 hw_pg_ver; 314 s8 hw_pg_ver;
401 315
402 u8 bssid[ETH_ALEN];
403 u8 mac_addr[ETH_ALEN]; 316 u8 mac_addr[ETH_ALEN];
404 u8 bss_type;
405 u8 set_bss_type;
406 u8 p2p; /* we are using p2p role */
407 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
408 u8 ssid_len;
409 int channel; 317 int channel;
410 u8 role_id;
411 u8 dev_role_id;
412 u8 system_hlid; 318 u8 system_hlid;
413 u8 sta_hlid;
414 u8 dev_hlid;
415 u8 ap_global_hlid;
416 u8 ap_bcast_hlid;
417 319
418 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)]; 320 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
419 unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; 321 unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
420 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)]; 322 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
323 unsigned long rate_policies_map[
324 BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
325
326 struct list_head wlvif_list;
327
328 u8 sta_count;
329 u8 ap_count;
421 330
422 struct wl1271_acx_mem_map *target_mem_map; 331 struct wl1271_acx_mem_map *target_mem_map;
423 332
@@ -440,11 +349,7 @@ struct wl1271 {
440 /* Time-offset between host and chipset clocks */ 349 /* Time-offset between host and chipset clocks */
441 s64 time_offset; 350 s64 time_offset;
442 351
443 /* Session counter for the chipset */
444 int session_counter;
445
446 /* Frames scheduled for transmission, not handled yet */ 352 /* Frames scheduled for transmission, not handled yet */
447 struct sk_buff_head tx_queue[NUM_TX_QUEUES];
448 int tx_queue_count[NUM_TX_QUEUES]; 353 int tx_queue_count[NUM_TX_QUEUES];
449 long stopped_queues_map; 354 long stopped_queues_map;
450 355
@@ -462,17 +367,6 @@ struct wl1271 {
462 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS]; 367 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
463 int tx_frames_cnt; 368 int tx_frames_cnt;
464 369
465 /*
466 * Security sequence number
467 * bits 0-15: lower 16 bits part of sequence number
468 * bits 16-47: higher 32 bits part of sequence number
469 * bits 48-63: not in use
470 */
471 u64 tx_security_seq;
472
473 /* 8 bits of the last sequence number in use */
474 u8 tx_security_last_seq_lsb;
475
476 /* FW Rx counter */ 370 /* FW Rx counter */
477 u32 rx_counter; 371 u32 rx_counter;
478 372
@@ -507,59 +401,21 @@ struct wl1271 {
507 u32 mbox_ptr[2]; 401 u32 mbox_ptr[2];
508 402
509 /* Are we currently scanning */ 403 /* Are we currently scanning */
404 struct ieee80211_vif *scan_vif;
510 struct wl1271_scan scan; 405 struct wl1271_scan scan;
511 struct delayed_work scan_complete_work; 406 struct delayed_work scan_complete_work;
512 407
513 bool sched_scanning; 408 bool sched_scanning;
514 409
515 /* probe-req template for the current AP */
516 struct sk_buff *probereq;
517
518 /* Our association ID */
519 u16 aid;
520
521 /*
522 * currently configured rate set:
523 * bits 0-15 - 802.11abg rates
524 * bits 16-23 - 802.11n MCS index mask
525 * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
526 */
527 u32 basic_rate_set;
528 u32 basic_rate;
529 u32 rate_set;
530 u32 bitrate_masks[IEEE80211_NUM_BANDS];
531
532 /* The current band */ 410 /* The current band */
533 enum ieee80211_band band; 411 enum ieee80211_band band;
534 412
535 /* Beaconing interval (needed for ad-hoc) */
536 u32 beacon_int;
537
538 /* Default key (for WEP) */
539 u32 default_key;
540
541 /* Rx Streaming */
542 struct work_struct rx_streaming_enable_work;
543 struct work_struct rx_streaming_disable_work;
544 struct timer_list rx_streaming_timer;
545
546 struct completion *elp_compl; 413 struct completion *elp_compl;
547 struct completion *ps_compl;
548 struct delayed_work elp_work; 414 struct delayed_work elp_work;
549 struct delayed_work pspoll_work;
550
551 /* counter for ps-poll delivery failures */
552 int ps_poll_failures;
553
554 /* retry counter for PSM entries */
555 u8 psm_entry_retry;
556 415
557 /* in dBm */ 416 /* in dBm */
558 int power_level; 417 int power_level;
559 418
560 int rssi_thold;
561 int last_rssi_event;
562
563 struct wl1271_stats stats; 419 struct wl1271_stats stats;
564 420
565 __le32 buffer_32; 421 __le32 buffer_32;
@@ -583,20 +439,9 @@ struct wl1271 {
583 /* Most recently reported noise in dBm */ 439 /* Most recently reported noise in dBm */
584 s8 noise; 440 s8 noise;
585 441
586 /* map for HLIDs of associated stations - when operating in AP mode */
587 unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
588
589 /* recoreded keys for AP-mode - set here before AP startup */
590 struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
591
592 /* bands supported by this instance of wl12xx */ 442 /* bands supported by this instance of wl12xx */
593 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 443 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
594 444
595 /* RX BA constraint value */
596 bool ba_support;
597 u8 ba_rx_bitmap;
598 bool ba_allowed;
599
600 int tcxo_clock; 445 int tcxo_clock;
601 446
602 /* 447 /*
@@ -610,10 +455,7 @@ struct wl1271 {
610 * AP-mode - links indexed by HLID. The global and broadcast links 455 * AP-mode - links indexed by HLID. The global and broadcast links
611 * are always active. 456 * are always active.
612 */ 457 */
613 struct wl1271_link links[AP_MAX_LINKS]; 458 struct wl1271_link links[WL12XX_MAX_LINKS];
614
615 /* the hlid of the link where the last transmitted skb came from */
616 int last_tx_hlid;
617 459
618 /* AP-mode - a bitmap of links currently in PS mode according to FW */ 460 /* AP-mode - a bitmap of links currently in PS mode according to FW */
619 u32 ap_fw_ps_map; 461 u32 ap_fw_ps_map;
@@ -632,21 +474,173 @@ struct wl1271 {
632 474
633 /* AP-mode - number of currently connected stations */ 475 /* AP-mode - number of currently connected stations */
634 int active_sta_count; 476 int active_sta_count;
477
478 /* last wlvif we transmitted from */
479 struct wl12xx_vif *last_wlvif;
635}; 480};
636 481
637struct wl1271_station { 482struct wl1271_station {
638 u8 hlid; 483 u8 hlid;
639}; 484};
640 485
486struct wl12xx_vif {
487 struct wl1271 *wl;
488 struct list_head list;
489 unsigned long flags;
490 u8 bss_type;
491 u8 p2p; /* we are using p2p role */
492 u8 role_id;
493
494 /* sta/ibss specific */
495 u8 dev_role_id;
496 u8 dev_hlid;
497
498 union {
499 struct {
500 u8 hlid;
501 u8 ba_rx_bitmap;
502
503 u8 basic_rate_idx;
504 u8 ap_rate_idx;
505 u8 p2p_rate_idx;
506 } sta;
507 struct {
508 u8 global_hlid;
509 u8 bcast_hlid;
510
511 /* HLIDs bitmap of associated stations */
512 unsigned long sta_hlid_map[BITS_TO_LONGS(
513 WL12XX_MAX_LINKS)];
514
515 /* recoreded keys - set here before AP startup */
516 struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
517
518 u8 mgmt_rate_idx;
519 u8 bcast_rate_idx;
520 u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT];
521 } ap;
522 };
523
524 /* the hlid of the last transmitted skb */
525 int last_tx_hlid;
526
527 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
528
529 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
530 u8 ssid_len;
531
532 /* The current band */
533 enum ieee80211_band band;
534 int channel;
535
536 u32 bitrate_masks[IEEE80211_NUM_BANDS];
537 u32 basic_rate_set;
538
539 /*
540 * currently configured rate set:
541 * bits 0-15 - 802.11abg rates
542 * bits 16-23 - 802.11n MCS index mask
543 * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
544 */
545 u32 basic_rate;
546 u32 rate_set;
547
548 /* probe-req template for the current AP */
549 struct sk_buff *probereq;
550
551 /* Beaconing interval (needed for ad-hoc) */
552 u32 beacon_int;
553
554 /* Default key (for WEP) */
555 u32 default_key;
556
557 /* Our association ID */
558 u16 aid;
559
560 /* Session counter for the chipset */
561 int session_counter;
562
563 struct completion *ps_compl;
564 struct delayed_work pspoll_work;
565
566 /* counter for ps-poll delivery failures */
567 int ps_poll_failures;
568
569 /* retry counter for PSM entries */
570 u8 psm_entry_retry;
571
572 /* in dBm */
573 int power_level;
574
575 int rssi_thold;
576 int last_rssi_event;
577
578 /* RX BA constraint value */
579 bool ba_support;
580 bool ba_allowed;
581
582 /* Rx Streaming */
583 struct work_struct rx_streaming_enable_work;
584 struct work_struct rx_streaming_disable_work;
585 struct timer_list rx_streaming_timer;
586
587 /*
588 * This struct must be last!
589 * data that has to be saved acrossed reconfigs (e.g. recovery)
590 * should be declared in this struct.
591 */
592 struct {
593 u8 persistent[0];
594 /*
595 * Security sequence number
596 * bits 0-15: lower 16 bits part of sequence number
597 * bits 16-47: higher 32 bits part of sequence number
598 * bits 48-63: not in use
599 */
600 u64 tx_security_seq;
601
602 /* 8 bits of the last sequence number in use */
603 u8 tx_security_last_seq_lsb;
604 };
605};
606
607static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
608{
609 return (struct wl12xx_vif *)vif->drv_priv;
610}
611
612static inline
613struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
614{
615 return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
616}
617
618#define wl12xx_for_each_wlvif(wl, wlvif) \
619 list_for_each_entry(wlvif, &wl->wlvif_list, list)
620
621#define wl12xx_for_each_wlvif_continue(wl, wlvif) \
622 list_for_each_entry_continue(wlvif, &wl->wlvif_list, list)
623
624#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type) \
625 wl12xx_for_each_wlvif(wl, wlvif) \
626 if (wlvif->bss_type == _bss_type)
627
628#define wl12xx_for_each_wlvif_sta(wl, wlvif) \
629 wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS)
630
631#define wl12xx_for_each_wlvif_ap(wl, wlvif) \
632 wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
633
641int wl1271_plt_start(struct wl1271 *wl); 634int wl1271_plt_start(struct wl1271 *wl);
642int wl1271_plt_stop(struct wl1271 *wl); 635int wl1271_plt_stop(struct wl1271 *wl);
643int wl1271_recalc_rx_streaming(struct wl1271 *wl); 636int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
644void wl12xx_queue_recovery_work(struct wl1271 *wl); 637void wl12xx_queue_recovery_work(struct wl1271 *wl);
645size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen); 638size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
646 639
647#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */ 640#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
648 641
649#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */ 642#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */
643#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */
650 644
651#define WL1271_DEFAULT_POWER_LEVEL 0 645#define WL1271_DEFAULT_POWER_LEVEL 0
652 646
@@ -669,8 +663,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
669/* Each RX/TX transaction requires an end-of-transaction transfer */ 663/* Each RX/TX transaction requires an end-of-transaction transfer */
670#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0) 664#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
671 665
672/* WL128X requires aggregated packets to be aligned to the SDIO block size */ 666/* wl127x and SPI don't support SDIO block size alignment */
673#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2) 667#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2)
674 668
675/* Older firmwares did not implement the FW logger over bus feature */ 669/* Older firmwares did not implement the FW logger over bus feature */
676#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4) 670#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index f7971d3b0898..8f0ffaf62309 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -116,11 +116,6 @@ struct wl12xx_ps_poll_template {
116 u8 ta[ETH_ALEN]; 116 u8 ta[ETH_ALEN];
117} __packed; 117} __packed;
118 118
119struct wl12xx_qos_null_data_template {
120 struct ieee80211_header header;
121 __le16 qos_ctl;
122} __packed;
123
124struct wl12xx_arp_rsp_template { 119struct wl12xx_arp_rsp_template {
125 struct ieee80211_hdr_3addr hdr; 120 struct ieee80211_hdr_3addr hdr;
126 121
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
index 973b11060a8f..3c96b332184e 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
+++ b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
@@ -2,7 +2,7 @@
2#include <linux/err.h> 2#include <linux/err.h>
3#include <linux/wl12xx.h> 3#include <linux/wl12xx.h>
4 4
5static const struct wl12xx_platform_data *platform_data; 5static struct wl12xx_platform_data *platform_data;
6 6
7int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data) 7int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
8{ 8{
@@ -18,7 +18,7 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
18 return 0; 18 return 0;
19} 19}
20 20
21const struct wl12xx_platform_data *wl12xx_get_platform_data(void) 21struct wl12xx_platform_data *wl12xx_get_platform_data(void)
22{ 22{
23 if (!platform_data) 23 if (!platform_data)
24 return ERR_PTR(-ENODEV); 24 return ERR_PTR(-ENODEV);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index cf0d69dd7be5..785bdbe38f2a 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -28,6 +28,7 @@
28#include <linux/skbuff.h> 28#include <linux/skbuff.h>
29#include <linux/usb.h> 29#include <linux/usb.h>
30#include <linux/workqueue.h> 30#include <linux/workqueue.h>
31#include <linux/module.h>
31#include <net/mac80211.h> 32#include <net/mac80211.h>
32#include <asm/unaligned.h> 33#include <asm/unaligned.h>
33 34
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 161f207786a4..94b79c3338c4 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -58,10 +58,6 @@ struct xenvif {
58 u8 fe_dev_addr[6]; 58 u8 fe_dev_addr[6];
59 59
60 /* Physical parameters of the comms window. */ 60 /* Physical parameters of the comms window. */
61 grant_handle_t tx_shmem_handle;
62 grant_ref_t tx_shmem_ref;
63 grant_handle_t rx_shmem_handle;
64 grant_ref_t rx_shmem_ref;
65 unsigned int irq; 61 unsigned int irq;
66 62
67 /* List of frontends to notify after a batch of frames sent. */ 63 /* List of frontends to notify after a batch of frames sent. */
@@ -70,8 +66,6 @@ struct xenvif {
70 /* The shared rings and indexes. */ 66 /* The shared rings and indexes. */
71 struct xen_netif_tx_back_ring tx; 67 struct xen_netif_tx_back_ring tx;
72 struct xen_netif_rx_back_ring rx; 68 struct xen_netif_rx_back_ring rx;
73 struct vm_struct *tx_comms_area;
74 struct vm_struct *rx_comms_area;
75 69
76 /* Frontend feature information. */ 70 /* Frontend feature information. */
77 u8 can_sg:1; 71 u8 can_sg:1;
@@ -106,6 +100,11 @@ struct xenvif {
106 wait_queue_head_t waiting_to_free; 100 wait_queue_head_t waiting_to_free;
107}; 101};
108 102
103static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
104{
105 return to_xenbus_device(vif->dev->dev.parent);
106}
107
109#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) 108#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
110#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) 109#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
111 110
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d5508957200e..0cb594c86090 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1589,88 +1589,42 @@ static int xen_netbk_kthread(void *data)
1589 1589
1590void xen_netbk_unmap_frontend_rings(struct xenvif *vif) 1590void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1591{ 1591{
1592 struct gnttab_unmap_grant_ref op; 1592 if (vif->tx.sring)
1593 1593 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1594 if (vif->tx.sring) { 1594 vif->tx.sring);
1595 gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr, 1595 if (vif->rx.sring)
1596 GNTMAP_host_map, vif->tx_shmem_handle); 1596 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1597 1597 vif->rx.sring);
1598 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
1599 BUG();
1600 }
1601
1602 if (vif->rx.sring) {
1603 gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr,
1604 GNTMAP_host_map, vif->rx_shmem_handle);
1605
1606 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
1607 BUG();
1608 }
1609 if (vif->rx_comms_area)
1610 free_vm_area(vif->rx_comms_area);
1611 if (vif->tx_comms_area)
1612 free_vm_area(vif->tx_comms_area);
1613} 1598}
1614 1599
1615int xen_netbk_map_frontend_rings(struct xenvif *vif, 1600int xen_netbk_map_frontend_rings(struct xenvif *vif,
1616 grant_ref_t tx_ring_ref, 1601 grant_ref_t tx_ring_ref,
1617 grant_ref_t rx_ring_ref) 1602 grant_ref_t rx_ring_ref)
1618{ 1603{
1619 struct gnttab_map_grant_ref op; 1604 void *addr;
1620 struct xen_netif_tx_sring *txs; 1605 struct xen_netif_tx_sring *txs;
1621 struct xen_netif_rx_sring *rxs; 1606 struct xen_netif_rx_sring *rxs;
1622 1607
1623 int err = -ENOMEM; 1608 int err = -ENOMEM;
1624 1609
1625 vif->tx_comms_area = alloc_vm_area(PAGE_SIZE); 1610 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1626 if (vif->tx_comms_area == NULL) 1611 tx_ring_ref, &addr);
1612 if (err)
1627 goto err; 1613 goto err;
1628 1614
1629 vif->rx_comms_area = alloc_vm_area(PAGE_SIZE); 1615 txs = (struct xen_netif_tx_sring *)addr;
1630 if (vif->rx_comms_area == NULL)
1631 goto err;
1632
1633 gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
1634 GNTMAP_host_map, tx_ring_ref, vif->domid);
1635
1636 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
1637 BUG();
1638
1639 if (op.status) {
1640 netdev_warn(vif->dev,
1641 "failed to map tx ring. err=%d status=%d\n",
1642 err, op.status);
1643 err = op.status;
1644 goto err;
1645 }
1646
1647 vif->tx_shmem_ref = tx_ring_ref;
1648 vif->tx_shmem_handle = op.handle;
1649
1650 txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
1651 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); 1616 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1652 1617
1653 gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr, 1618 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1654 GNTMAP_host_map, rx_ring_ref, vif->domid); 1619 rx_ring_ref, &addr);
1655 1620 if (err)
1656 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
1657 BUG();
1658
1659 if (op.status) {
1660 netdev_warn(vif->dev,
1661 "failed to map rx ring. err=%d status=%d\n",
1662 err, op.status);
1663 err = op.status;
1664 goto err; 1621 goto err;
1665 }
1666
1667 vif->rx_shmem_ref = rx_ring_ref;
1668 vif->rx_shmem_handle = op.handle;
1669 vif->rx_req_cons_peek = 0;
1670 1622
1671 rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr; 1623 rxs = (struct xen_netif_rx_sring *)addr;
1672 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); 1624 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1673 1625
1626 vif->rx_req_cons_peek = 0;
1627
1674 return 0; 1628 return 0;
1675 1629
1676err: 1630err: